From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from lists.gentoo.org (pigeon.gentoo.org [208.92.234.80]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by finch.gentoo.org (Postfix) with ESMTPS id F25EB138334 for ; Wed, 6 Nov 2019 14:27:36 +0000 (UTC) Received: from pigeon.gentoo.org (localhost [127.0.0.1]) by pigeon.gentoo.org (Postfix) with SMTP id 3FD0FE0A00; Wed, 6 Nov 2019 14:27:36 +0000 (UTC) Received: from smtp.gentoo.org (smtp.gentoo.org [IPv6:2001:470:ea4a:1:5054:ff:fec7:86e4]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by pigeon.gentoo.org (Postfix) with ESMTPS id D631BE0A00 for ; Wed, 6 Nov 2019 14:27:35 +0000 (UTC) Received: from oystercatcher.gentoo.org (oystercatcher.gentoo.org [148.251.78.52]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by smtp.gentoo.org (Postfix) with ESMTPS id 3A9D534C980 for ; Wed, 6 Nov 2019 14:27:34 +0000 (UTC) Received: from localhost.localdomain (localhost [IPv6:::1]) by oystercatcher.gentoo.org (Postfix) with ESMTP id B51AB7F9 for ; Wed, 6 Nov 2019 14:27:32 +0000 (UTC) From: "Mike Pagano" To: gentoo-commits@lists.gentoo.org Content-Transfer-Encoding: 8bit Content-type: text/plain; charset=UTF-8 Reply-To: gentoo-dev@lists.gentoo.org, "Mike Pagano" Message-ID: <1573050437.5c57d019c84b891679f956769332a0a993c75e2d.mpagano@gentoo> Subject: [gentoo-commits] proj/linux-patches:5.3 commit in: / X-VCS-Repository: proj/linux-patches X-VCS-Files: 0000_README 1008_linux-5.3.9.patch X-VCS-Directories: / X-VCS-Committer: mpagano X-VCS-Committer-Name: Mike Pagano X-VCS-Revision: 5c57d019c84b891679f956769332a0a993c75e2d X-VCS-Branch: 5.3 Date: Wed, 6 Nov 2019 14:27:32 +0000 (UTC) Precedence: bulk List-Post: List-Help: List-Unsubscribe: List-Subscribe: List-Id: Gentoo Linux mail X-BeenThere: gentoo-commits@lists.gentoo.org X-Auto-Response-Suppress: DR, RN, NRN, OOF, AutoReply X-Archives-Salt: 76f5f938-ebb7-4d64-a615-0f798d500600 X-Archives-Hash: 4f40b62d31ab62b402f9e2ca4c887b4e commit: 5c57d019c84b891679f956769332a0a993c75e2d Author: Mike Pagano gentoo org> AuthorDate: Wed Nov 6 14:27:17 2019 +0000 Commit: Mike Pagano gentoo org> CommitDate: Wed Nov 6 14:27:17 2019 +0000 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5c57d019 Linux patch 5.3.9 Signed-off-by: Mike Pagano gentoo.org> 0000_README | 4 + 1008_linux-5.3.9.patch | 6974 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 6978 insertions(+) diff --git a/0000_README b/0000_README index bc9694a..c1a5896 100644 --- a/0000_README +++ b/0000_README @@ -75,6 +75,10 @@ Patch: 1007_linux-5.3.8.patch From: http://www.kernel.org Desc: Linux 5.3.8 +Patch: 1008_linux-5.3.9.patch +From: http://www.kernel.org +Desc: Linux 5.3.9 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1008_linux-5.3.9.patch b/1008_linux-5.3.9.patch new file mode 100644 index 0000000..6e9eabf --- /dev/null +++ b/1008_linux-5.3.9.patch @@ -0,0 +1,6974 @@ +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index 4c1971960afa..5ea005c9e2d6 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -5267,6 +5267,10 @@ + the unplug protocol + never -- do not unplug even if version check succeeds + ++ xen_legacy_crash [X86,XEN] ++ Crash from Xen panic notifier, without executing late ++ panic() code such as dumping handler. ++ + xen_nopvspin [X86,XEN] + Disables the ticketlock slowpath using Xen PV + optimizations. +diff --git a/Documentation/scheduler/sched-bwc.rst b/Documentation/scheduler/sched-bwc.rst +index 3a9064219656..9801d6b284b1 100644 +--- a/Documentation/scheduler/sched-bwc.rst ++++ b/Documentation/scheduler/sched-bwc.rst +@@ -9,15 +9,16 @@ CFS bandwidth control is a CONFIG_FAIR_GROUP_SCHED extension which allows the + specification of the maximum CPU bandwidth available to a group or hierarchy. + + The bandwidth allowed for a group is specified using a quota and period. Within +-each given "period" (microseconds), a group is allowed to consume only up to +-"quota" microseconds of CPU time. When the CPU bandwidth consumption of a +-group exceeds this limit (for that period), the tasks belonging to its +-hierarchy will be throttled and are not allowed to run again until the next +-period. +- +-A group's unused runtime is globally tracked, being refreshed with quota units +-above at each period boundary. As threads consume this bandwidth it is +-transferred to cpu-local "silos" on a demand basis. The amount transferred ++each given "period" (microseconds), a task group is allocated up to "quota" ++microseconds of CPU time. That quota is assigned to per-cpu run queues in ++slices as threads in the cgroup become runnable. Once all quota has been ++assigned any additional requests for quota will result in those threads being ++throttled. Throttled threads will not be able to run again until the next ++period when the quota is replenished. ++ ++A group's unassigned quota is globally tracked, being refreshed back to ++cfs_quota units at each period boundary. As threads consume this bandwidth it ++is transferred to cpu-local "silos" on a demand basis. The amount transferred + within each of these updates is tunable and described as the "slice". + + Management +@@ -35,12 +36,12 @@ The default values are:: + + A value of -1 for cpu.cfs_quota_us indicates that the group does not have any + bandwidth restriction in place, such a group is described as an unconstrained +-bandwidth group. This represents the traditional work-conserving behavior for ++bandwidth group. This represents the traditional work-conserving behavior for + CFS. + + Writing any (valid) positive value(s) will enact the specified bandwidth limit. +-The minimum quota allowed for the quota or period is 1ms. There is also an +-upper bound on the period length of 1s. Additional restrictions exist when ++The minimum quota allowed for the quota or period is 1ms. There is also an ++upper bound on the period length of 1s. Additional restrictions exist when + bandwidth limits are used in a hierarchical fashion, these are explained in + more detail below. + +@@ -53,8 +54,8 @@ unthrottled if it is in a constrained state. + System wide settings + -------------------- + For efficiency run-time is transferred between the global pool and CPU local +-"silos" in a batch fashion. This greatly reduces global accounting pressure +-on large systems. The amount transferred each time such an update is required ++"silos" in a batch fashion. This greatly reduces global accounting pressure ++on large systems. The amount transferred each time such an update is required + is described as the "slice". + + This is tunable via procfs:: +@@ -97,6 +98,51 @@ There are two ways in which a group may become throttled: + In case b) above, even though the child may have runtime remaining it will not + be allowed to until the parent's runtime is refreshed. + ++CFS Bandwidth Quota Caveats ++--------------------------- ++Once a slice is assigned to a cpu it does not expire. However all but 1ms of ++the slice may be returned to the global pool if all threads on that cpu become ++unrunnable. This is configured at compile time by the min_cfs_rq_runtime ++variable. This is a performance tweak that helps prevent added contention on ++the global lock. ++ ++The fact that cpu-local slices do not expire results in some interesting corner ++cases that should be understood. ++ ++For cgroup cpu constrained applications that are cpu limited this is a ++relatively moot point because they will naturally consume the entirety of their ++quota as well as the entirety of each cpu-local slice in each period. As a ++result it is expected that nr_periods roughly equal nr_throttled, and that ++cpuacct.usage will increase roughly equal to cfs_quota_us in each period. ++ ++For highly-threaded, non-cpu bound applications this non-expiration nuance ++allows applications to briefly burst past their quota limits by the amount of ++unused slice on each cpu that the task group is running on (typically at most ++1ms per cpu or as defined by min_cfs_rq_runtime). This slight burst only ++applies if quota had been assigned to a cpu and then not fully used or returned ++in previous periods. This burst amount will not be transferred between cores. ++As a result, this mechanism still strictly limits the task group to quota ++average usage, albeit over a longer time window than a single period. This ++also limits the burst ability to no more than 1ms per cpu. This provides ++better more predictable user experience for highly threaded applications with ++small quota limits on high core count machines. It also eliminates the ++propensity to throttle these applications while simultanously using less than ++quota amounts of cpu. Another way to say this, is that by allowing the unused ++portion of a slice to remain valid across periods we have decreased the ++possibility of wastefully expiring quota on cpu-local silos that don't need a ++full slice's amount of cpu time. ++ ++The interaction between cpu-bound and non-cpu-bound-interactive applications ++should also be considered, especially when single core usage hits 100%. If you ++gave each of these applications half of a cpu-core and they both got scheduled ++on the same CPU it is theoretically possible that the non-cpu bound application ++will use up to 1ms additional quota in some periods, thereby preventing the ++cpu-bound application from fully using its quota by that same amount. In these ++instances it will be up to the CFS algorithm (see sched-design-CFS.rst) to ++decide which application is chosen to run, as they will both be runnable and ++have remaining quota. This runtime discrepancy will be made up in the following ++periods when the interactive application idles. ++ + Examples + -------- + 1. Limit a group to 1 CPU worth of runtime:: +diff --git a/Makefile b/Makefile +index 445f9488d8ba..ad5f5230bbbe 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 3 +-SUBLEVEL = 8 ++SUBLEVEL = 9 + EXTRAVERSION = + NAME = Bobtail Squid + +diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c +index 861a8aea51f9..661fd842ea97 100644 +--- a/arch/arc/kernel/perf_event.c ++++ b/arch/arc/kernel/perf_event.c +@@ -614,8 +614,8 @@ static int arc_pmu_device_probe(struct platform_device *pdev) + /* loop thru all available h/w condition indexes */ + for (i = 0; i < cc_bcr.c; i++) { + write_aux_reg(ARC_REG_CC_INDEX, i); +- cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); +- cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); ++ cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0)); ++ cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1)); + + arc_pmu_map_hw_event(i, cc_name.str); + arc_pmu_add_raw_event_attr(i, cc_name.str); +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index e8cf56283871..f63b824cdc2d 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -111,7 +111,7 @@ config ARM64 + select GENERIC_STRNLEN_USER + select GENERIC_TIME_VSYSCALL + select GENERIC_GETTIMEOFDAY +- select GENERIC_COMPAT_VDSO if (!CPU_BIG_ENDIAN && COMPAT) ++ select GENERIC_COMPAT_VDSO if (!CPU_BIG_ENDIAN && COMPAT && "$(CROSS_COMPILE_COMPAT)" != "") + select HANDLE_DOMAIN_IRQ + select HARDIRQS_SW_RESEND + select HAVE_PCI +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile +index 61de992bbea3..5858d6e44926 100644 +--- a/arch/arm64/Makefile ++++ b/arch/arm64/Makefile +@@ -47,20 +47,16 @@ $(warning Detected assembler with broken .inst; disassembly will be unreliable) + endif + endif + ++ifeq ($(CONFIG_CC_IS_CLANG), y) ++COMPATCC ?= $(CC) --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) ++else ++COMPATCC ?= $(CROSS_COMPILE_COMPAT)gcc ++endif ++export COMPATCC ++ + ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y) +- CROSS_COMPILE_COMPAT ?= $(CONFIG_CROSS_COMPILE_COMPAT_VDSO:"%"=%) +- +- ifeq ($(CONFIG_CC_IS_CLANG), y) +- $(warning CROSS_COMPILE_COMPAT is clang, the compat vDSO will not be built) +- else ifeq ($(strip $(CROSS_COMPILE_COMPAT)),) +- $(warning CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built) +- else ifeq ($(shell which $(CROSS_COMPILE_COMPAT)gcc 2> /dev/null),) +- $(error $(CROSS_COMPILE_COMPAT)gcc not found, check CROSS_COMPILE_COMPAT) +- else +- export CROSS_COMPILE_COMPAT +- export CONFIG_COMPAT_VDSO := y +- compat_vdso := -DCONFIG_COMPAT_VDSO=1 +- endif ++ export CONFIG_COMPAT_VDSO := y ++ compat_vdso := -DCONFIG_COMPAT_VDSO=1 + endif + + KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) $(compat_vdso) +diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile +index 0a7e5dfce6f7..954d75de617b 100644 +--- a/arch/arm64/boot/dts/qcom/Makefile ++++ b/arch/arm64/boot/dts/qcom/Makefile +@@ -6,6 +6,9 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8916-mtp.dtb + dtb-$(CONFIG_ARCH_QCOM) += msm8992-bullhead-rev-101.dtb + dtb-$(CONFIG_ARCH_QCOM) += msm8994-angler-rev-101.dtb + dtb-$(CONFIG_ARCH_QCOM) += msm8996-mtp.dtb ++dtb-$(CONFIG_ARCH_QCOM) += msm8998-asus-novago-tp370ql.dtb ++dtb-$(CONFIG_ARCH_QCOM) += msm8998-hp-envy-x2.dtb ++dtb-$(CONFIG_ARCH_QCOM) += msm8998-lenovo-miix-630.dtb + dtb-$(CONFIG_ARCH_QCOM) += msm8998-mtp.dtb + dtb-$(CONFIG_ARCH_QCOM) += sdm845-cheza-r1.dtb + dtb-$(CONFIG_ARCH_QCOM) += sdm845-cheza-r2.dtb +diff --git a/arch/arm64/boot/dts/qcom/msm8998-asus-novago-tp370ql.dts b/arch/arm64/boot/dts/qcom/msm8998-asus-novago-tp370ql.dts +new file mode 100644 +index 000000000000..db5821be1e2f +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/msm8998-asus-novago-tp370ql.dts +@@ -0,0 +1,47 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (c) 2019, Jeffrey Hugo. All rights reserved. */ ++ ++/dts-v1/; ++ ++#include "msm8998-clamshell.dtsi" ++ ++/ { ++ model = "Asus NovaGo TP370QL"; ++ compatible = "asus,novago-tp370ql", "qcom,msm8998"; ++}; ++ ++&blsp1_i2c6 { ++ status = "okay"; ++ ++ touchpad@15 { ++ compatible = "hid-over-i2c"; ++ interrupt-parent = <&tlmm>; ++ interrupts = <0x7b IRQ_TYPE_LEVEL_LOW>; ++ reg = <0x15>; ++ hid-descr-addr = <0x0001>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&touchpad>; ++ }; ++ ++ keyboard@3a { ++ compatible = "hid-over-i2c"; ++ interrupt-parent = <&tlmm>; ++ interrupts = <0x25 IRQ_TYPE_LEVEL_LOW>; ++ reg = <0x3a>; ++ hid-descr-addr = <0x0001>; ++ }; ++}; ++ ++&sdhc2 { ++ cd-gpios = <&tlmm 95 GPIO_ACTIVE_HIGH>; ++}; ++ ++&tlmm { ++ touchpad: touchpad { ++ config { ++ pins = "gpio123"; ++ bias-pull-up; ++ }; ++ }; ++}; +diff --git a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi +new file mode 100644 +index 000000000000..9682d4dd7496 +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi +@@ -0,0 +1,240 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (c) 2019, Jeffrey Hugo. All rights reserved. */ ++ ++/* ++ * Common include for MSM8998 clamshell devices, ie the Lenovo Miix 630, ++ * Asus NovaGo TP370QL, and HP Envy x2. All three devices are basically the ++ * same, with differences in peripherals. ++ */ ++ ++#include "msm8998.dtsi" ++#include "pm8998.dtsi" ++#include "pm8005.dtsi" ++ ++/ { ++ chosen { ++ }; ++ ++ vph_pwr: vph-pwr-regulator { ++ compatible = "regulator-fixed"; ++ regulator-name = "vph_pwr"; ++ regulator-always-on; ++ regulator-boot-on; ++ }; ++}; ++ ++&qusb2phy { ++ status = "okay"; ++ ++ vdda-pll-supply = <&vreg_l12a_1p8>; ++ vdda-phy-dpdm-supply = <&vreg_l24a_3p075>; ++}; ++ ++&rpm_requests { ++ pm8998-regulators { ++ compatible = "qcom,rpm-pm8998-regulators"; ++ ++ vdd_s1-supply = <&vph_pwr>; ++ vdd_s2-supply = <&vph_pwr>; ++ vdd_s3-supply = <&vph_pwr>; ++ vdd_s4-supply = <&vph_pwr>; ++ vdd_s5-supply = <&vph_pwr>; ++ vdd_s6-supply = <&vph_pwr>; ++ vdd_s7-supply = <&vph_pwr>; ++ vdd_s8-supply = <&vph_pwr>; ++ vdd_s9-supply = <&vph_pwr>; ++ vdd_s10-supply = <&vph_pwr>; ++ vdd_s11-supply = <&vph_pwr>; ++ vdd_s12-supply = <&vph_pwr>; ++ vdd_s13-supply = <&vph_pwr>; ++ vdd_l1_l27-supply = <&vreg_s7a_1p025>; ++ vdd_l2_l8_l17-supply = <&vreg_s3a_1p35>; ++ vdd_l3_l11-supply = <&vreg_s7a_1p025>; ++ vdd_l4_l5-supply = <&vreg_s7a_1p025>; ++ vdd_l6-supply = <&vreg_s5a_2p04>; ++ vdd_l7_l12_l14_l15-supply = <&vreg_s5a_2p04>; ++ vdd_l9-supply = <&vph_pwr>; ++ vdd_l10_l23_l25-supply = <&vph_pwr>; ++ vdd_l13_l19_l21-supply = <&vph_pwr>; ++ vdd_l16_l28-supply = <&vph_pwr>; ++ vdd_l18_l22-supply = <&vph_pwr>; ++ vdd_l20_l24-supply = <&vph_pwr>; ++ vdd_l26-supply = <&vreg_s3a_1p35>; ++ vdd_lvs1_lvs2-supply = <&vreg_s4a_1p8>; ++ ++ vreg_s3a_1p35: s3 { ++ regulator-min-microvolt = <1352000>; ++ regulator-max-microvolt = <1352000>; ++ }; ++ vreg_s4a_1p8: s4 { ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <1800000>; ++ regulator-allow-set-load; ++ }; ++ vreg_s5a_2p04: s5 { ++ regulator-min-microvolt = <1904000>; ++ regulator-max-microvolt = <2040000>; ++ }; ++ vreg_s7a_1p025: s7 { ++ regulator-min-microvolt = <900000>; ++ regulator-max-microvolt = <1028000>; ++ }; ++ vreg_l1a_0p875: l1 { ++ regulator-min-microvolt = <880000>; ++ regulator-max-microvolt = <880000>; ++ regulator-allow-set-load; ++ }; ++ vreg_l2a_1p2: l2 { ++ regulator-min-microvolt = <1200000>; ++ regulator-max-microvolt = <1200000>; ++ regulator-allow-set-load; ++ }; ++ vreg_l3a_1p0: l3 { ++ regulator-min-microvolt = <1000000>; ++ regulator-max-microvolt = <1000000>; ++ }; ++ vreg_l5a_0p8: l5 { ++ regulator-min-microvolt = <800000>; ++ regulator-max-microvolt = <800000>; ++ }; ++ vreg_l6a_1p8: l6 { ++ regulator-min-microvolt = <1808000>; ++ regulator-max-microvolt = <1808000>; ++ }; ++ vreg_l7a_1p8: l7 { ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <1800000>; ++ }; ++ vreg_l8a_1p2: l8 { ++ regulator-min-microvolt = <1200000>; ++ regulator-max-microvolt = <1200000>; ++ }; ++ vreg_l9a_1p8: l9 { ++ regulator-min-microvolt = <1808000>; ++ regulator-max-microvolt = <2960000>; ++ }; ++ vreg_l10a_1p8: l10 { ++ regulator-min-microvolt = <1808000>; ++ regulator-max-microvolt = <2960000>; ++ }; ++ vreg_l11a_1p0: l11 { ++ regulator-min-microvolt = <1000000>; ++ regulator-max-microvolt = <1000000>; ++ }; ++ vreg_l12a_1p8: l12 { ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <1800000>; ++ }; ++ vreg_l13a_2p95: l13 { ++ regulator-min-microvolt = <1808000>; ++ regulator-max-microvolt = <2960000>; ++ }; ++ vreg_l14a_1p88: l14 { ++ regulator-min-microvolt = <1880000>; ++ regulator-max-microvolt = <1880000>; ++ }; ++ vreg_15a_1p8: l15 { ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <1800000>; ++ }; ++ vreg_l16a_2p7: l16 { ++ regulator-min-microvolt = <2704000>; ++ regulator-max-microvolt = <2704000>; ++ }; ++ vreg_l17a_1p3: l17 { ++ regulator-min-microvolt = <1304000>; ++ regulator-max-microvolt = <1304000>; ++ }; ++ vreg_l18a_2p7: l18 { ++ regulator-min-microvolt = <2704000>; ++ regulator-max-microvolt = <2704000>; ++ }; ++ vreg_l19a_3p0: l19 { ++ regulator-min-microvolt = <3008000>; ++ regulator-max-microvolt = <3008000>; ++ }; ++ vreg_l20a_2p95: l20 { ++ regulator-min-microvolt = <2960000>; ++ regulator-max-microvolt = <2960000>; ++ regulator-allow-set-load; ++ }; ++ vreg_l21a_2p95: l21 { ++ regulator-min-microvolt = <2960000>; ++ regulator-max-microvolt = <2960000>; ++ regulator-allow-set-load; ++ regulator-system-load = <800000>; ++ }; ++ vreg_l22a_2p85: l22 { ++ regulator-min-microvolt = <2864000>; ++ regulator-max-microvolt = <2864000>; ++ }; ++ vreg_l23a_3p3: l23 { ++ regulator-min-microvolt = <3312000>; ++ regulator-max-microvolt = <3312000>; ++ }; ++ vreg_l24a_3p075: l24 { ++ regulator-min-microvolt = <3088000>; ++ regulator-max-microvolt = <3088000>; ++ }; ++ vreg_l25a_3p3: l25 { ++ regulator-min-microvolt = <3104000>; ++ regulator-max-microvolt = <3312000>; ++ }; ++ vreg_l26a_1p2: l26 { ++ regulator-min-microvolt = <1200000>; ++ regulator-max-microvolt = <1200000>; ++ }; ++ vreg_l28_3p0: l28 { ++ regulator-min-microvolt = <3008000>; ++ regulator-max-microvolt = <3008000>; ++ }; ++ ++ vreg_lvs1a_1p8: lvs1 { ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <1800000>; ++ }; ++ ++ vreg_lvs2a_1p8: lvs2 { ++ regulator-min-microvolt = <1800000>; ++ regulator-max-microvolt = <1800000>; ++ }; ++ ++ }; ++}; ++ ++&tlmm { ++ gpio-reserved-ranges = <0 4>, <81 4>; ++ ++ touchpad: touchpad { ++ config { ++ pins = "gpio123"; ++ bias-pull-up; /* pull up */ ++ }; ++ }; ++}; ++ ++&sdhc2 { ++ status = "okay"; ++ ++ vmmc-supply = <&vreg_l21a_2p95>; ++ vqmmc-supply = <&vreg_l13a_2p95>; ++ ++ pinctrl-names = "default", "sleep"; ++ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>; ++ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>; ++}; ++ ++&usb3 { ++ status = "okay"; ++}; ++ ++&usb3_dwc3 { ++ dr_mode = "host"; /* Force to host until we have Type-C hooked up */ ++}; ++ ++&usb3phy { ++ status = "okay"; ++ ++ vdda-phy-supply = <&vreg_l1a_0p875>; ++ vdda-pll-supply = <&vreg_l2a_1p2>; ++}; +diff --git a/arch/arm64/boot/dts/qcom/msm8998-hp-envy-x2.dts b/arch/arm64/boot/dts/qcom/msm8998-hp-envy-x2.dts +new file mode 100644 +index 000000000000..24073127091f +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/msm8998-hp-envy-x2.dts +@@ -0,0 +1,30 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (c) 2019, Jeffrey Hugo. All rights reserved. */ ++ ++/dts-v1/; ++ ++#include "msm8998-clamshell.dtsi" ++ ++/ { ++ model = "HP Envy x2"; ++ compatible = "hp,envy-x2", "qcom,msm8998"; ++}; ++ ++&blsp1_i2c6 { ++ status = "okay"; ++ ++ keyboard@3a { ++ compatible = "hid-over-i2c"; ++ interrupt-parent = <&tlmm>; ++ interrupts = <0x79 IRQ_TYPE_LEVEL_LOW>; ++ reg = <0x3a>; ++ hid-descr-addr = <0x0001>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&touchpad>; ++ }; ++}; ++ ++&sdhc2 { ++ cd-gpios = <&tlmm 95 GPIO_ACTIVE_LOW>; ++}; +diff --git a/arch/arm64/boot/dts/qcom/msm8998-lenovo-miix-630.dts b/arch/arm64/boot/dts/qcom/msm8998-lenovo-miix-630.dts +new file mode 100644 +index 000000000000..407c6a32911c +--- /dev/null ++++ b/arch/arm64/boot/dts/qcom/msm8998-lenovo-miix-630.dts +@@ -0,0 +1,30 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* Copyright (c) 2019, Jeffrey Hugo. All rights reserved. */ ++ ++/dts-v1/; ++ ++#include "msm8998-clamshell.dtsi" ++ ++/ { ++ model = "Lenovo Miix 630"; ++ compatible = "lenovo,miix-630", "qcom,msm8998"; ++}; ++ ++&blsp1_i2c6 { ++ status = "okay"; ++ ++ keyboard@3a { ++ compatible = "hid-over-i2c"; ++ interrupt-parent = <&tlmm>; ++ interrupts = <0x79 IRQ_TYPE_LEVEL_LOW>; ++ reg = <0x3a>; ++ hid-descr-addr = <0x0001>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&touchpad>; ++ }; ++}; ++ ++&sdhc2 { ++ cd-gpios = <&tlmm 95 GPIO_ACTIVE_HIGH>; ++}; +diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h +index 92d2e9f28f28..a7edc079bcfd 100644 +--- a/arch/arm64/include/asm/pgtable-prot.h ++++ b/arch/arm64/include/asm/pgtable-prot.h +@@ -32,11 +32,11 @@ + #define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) + #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) + +-#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) +-#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) +-#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) +-#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) +-#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) ++#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) ++#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) ++#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) ++#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) ++#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) + + #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) + #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) +@@ -80,8 +80,9 @@ + #define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(DEVICE_nGnRE) | PTE_S2_RDONLY | PAGE_S2_XN) + + #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) +-#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) +-#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) ++/* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */ ++#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) ++#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) + #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) + #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) + #define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) +diff --git a/arch/arm64/include/asm/vdso/compat_barrier.h b/arch/arm64/include/asm/vdso/compat_barrier.h +index fb60a88b5ed4..3fd8fd6d8fc2 100644 +--- a/arch/arm64/include/asm/vdso/compat_barrier.h ++++ b/arch/arm64/include/asm/vdso/compat_barrier.h +@@ -20,7 +20,7 @@ + + #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") + +-#if __LINUX_ARM_ARCH__ >= 8 ++#if __LINUX_ARM_ARCH__ >= 8 && defined(CONFIG_AS_DMB_ISHLD) + #define aarch32_smp_mb() dmb(ish) + #define aarch32_smp_rmb() dmb(ishld) + #define aarch32_smp_wmb() dmb(ishst) +diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c +index 2ec09debc2bb..ca158be21f83 100644 +--- a/arch/arm64/kernel/armv8_deprecated.c ++++ b/arch/arm64/kernel/armv8_deprecated.c +@@ -174,6 +174,9 @@ static void __init register_insn_emulation(struct insn_emulation_ops *ops) + struct insn_emulation *insn; + + insn = kzalloc(sizeof(*insn), GFP_KERNEL); ++ if (!insn) ++ return; ++ + insn->ops = ops; + insn->min = INSN_UNDEF; + +@@ -233,6 +236,8 @@ static void __init register_insn_emulation_sysctl(void) + + insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl), + GFP_KERNEL); ++ if (!insns_sysctl) ++ return; + + raw_spin_lock_irqsave(&insn_emulation_lock, flags); + list_for_each_entry(insn, &insn_emulation, node) { +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c +index 27b4a973f16d..1e0b9ae9bf7e 100644 +--- a/arch/arm64/kernel/cpu_errata.c ++++ b/arch/arm64/kernel/cpu_errata.c +@@ -816,6 +816,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { + { + .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", + .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, ++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, + .matches = cpucap_multi_entry_cap_matches, + .match_list = qcom_erratum_1003_list, + }, +diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c +index 9323bcc40a58..cabebf1a7976 100644 +--- a/arch/arm64/kernel/cpufeature.c ++++ b/arch/arm64/kernel/cpufeature.c +@@ -136,6 +136,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { + + static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0), ++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S +index 109894bd3194..239f6841a741 100644 +--- a/arch/arm64/kernel/entry.S ++++ b/arch/arm64/kernel/entry.S +@@ -775,6 +775,7 @@ el0_sync_compat: + b.ge el0_dbg + b el0_inv + el0_svc_compat: ++ gic_prio_kentry_setup tmp=x1 + mov x0, sp + bl el0_svc_compat_handler + b ret_to_user +diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c +index 171773257974..06e56b470315 100644 +--- a/arch/arm64/kernel/ftrace.c ++++ b/arch/arm64/kernel/ftrace.c +@@ -121,10 +121,16 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) + + /* + * Ensure updated trampoline is visible to instruction +- * fetch before we patch in the branch. ++ * fetch before we patch in the branch. Although the ++ * architecture doesn't require an IPI in this case, ++ * Neoverse-N1 erratum #1542419 does require one ++ * if the TLB maintenance in module_enable_ro() is ++ * skipped due to rodata_enabled. It doesn't seem worth ++ * it to make it conditional given that this is ++ * certainly not a fast-path. + */ +- __flush_icache_range((unsigned long)&dst[0], +- (unsigned long)&dst[1]); ++ flush_icache_range((unsigned long)&dst[0], ++ (unsigned long)&dst[1]); + } + addr = (unsigned long)dst; + #else /* CONFIG_ARM64_MODULE_PLTS */ +diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile +index 1fba0776ed40..aa171b043287 100644 +--- a/arch/arm64/kernel/vdso32/Makefile ++++ b/arch/arm64/kernel/vdso32/Makefile +@@ -8,8 +8,6 @@ + ARCH_REL_TYPE_ABS := R_ARM_JUMP_SLOT|R_ARM_GLOB_DAT|R_ARM_ABS32 + include $(srctree)/lib/vdso/Makefile + +-COMPATCC := $(CROSS_COMPILE_COMPAT)gcc +- + # Same as cc-*option, but using COMPATCC instead of CC + cc32-option = $(call try-run,\ + $(COMPATCC) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) +@@ -17,6 +15,8 @@ cc32-disable-warning = $(call try-run,\ + $(COMPATCC) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) + cc32-ldoption = $(call try-run,\ + $(COMPATCC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2)) ++cc32-as-instr = $(call try-run,\ ++ printf "%b\n" "$(1)" | $(COMPATCC) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3)) + + # We cannot use the global flags to compile the vDSO files, the main reason + # being that the 32-bit compiler may be older than the main (64-bit) compiler +@@ -25,11 +25,9 @@ cc32-ldoption = $(call try-run,\ + # arm64 one. + # As a result we set our own flags here. + +-# From top-level Makefile +-# NOSTDINC_FLAGS +-VDSO_CPPFLAGS := -nostdinc -isystem $(shell $(COMPATCC) -print-file-name=include) ++# KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile ++VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(COMPATCC) -print-file-name=include) + VDSO_CPPFLAGS += $(LINUXINCLUDE) +-VDSO_CPPFLAGS += $(KBUILD_CPPFLAGS) + + # Common C and assembly flags + # From top-level Makefile +@@ -55,6 +53,7 @@ endif + VDSO_CAFLAGS += -fPIC -fno-builtin -fno-stack-protector + VDSO_CAFLAGS += -DDISABLE_BRANCH_PROFILING + ++ + # Try to compile for ARMv8. If the compiler is too old and doesn't support it, + # fall back to v7. There is no easy way to check for what architecture the code + # is being compiled, so define a macro specifying that (see arch/arm/Makefile). +@@ -91,6 +90,12 @@ VDSO_CFLAGS += -Wno-int-to-pointer-cast + VDSO_AFLAGS := $(VDSO_CAFLAGS) + VDSO_AFLAGS += -D__ASSEMBLY__ + ++# Check for binutils support for dmb ishld ++dmbinstr := $(call cc32-as-instr,dmb ishld,-DCONFIG_AS_DMB_ISHLD=1) ++ ++VDSO_CFLAGS += $(dmbinstr) ++VDSO_AFLAGS += $(dmbinstr) ++ + VDSO_LDFLAGS := $(VDSO_CPPFLAGS) + # From arm vDSO Makefile + VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 +diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c +index 8772617b64ce..80112f2298b6 100644 +--- a/arch/mips/fw/sni/sniprom.c ++++ b/arch/mips/fw/sni/sniprom.c +@@ -43,7 +43,7 @@ + + /* O32 stack has to be 8-byte aligned. */ + static u64 o32_stk[4096]; +-#define O32_STK &o32_stk[sizeof(o32_stk)] ++#define O32_STK (&o32_stk[ARRAY_SIZE(o32_stk)]) + + #define __PROM_O32(fun, arg) fun arg __asm__(#fun); \ + __asm__(#fun " = call_o32") +diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h +index c8a47d18f628..2b61052e10c9 100644 +--- a/arch/mips/include/asm/cmpxchg.h ++++ b/arch/mips/include/asm/cmpxchg.h +@@ -77,8 +77,8 @@ extern unsigned long __xchg_called_with_bad_pointer(void) + extern unsigned long __xchg_small(volatile void *ptr, unsigned long val, + unsigned int size); + +-static inline unsigned long __xchg(volatile void *ptr, unsigned long x, +- int size) ++static __always_inline ++unsigned long __xchg(volatile void *ptr, unsigned long x, int size) + { + switch (size) { + case 1: +@@ -153,8 +153,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x, + extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, + unsigned long new, unsigned int size); + +-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, +- unsigned long new, unsigned int size) ++static __always_inline ++unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ++ unsigned long new, unsigned int size) + { + switch (size) { + case 1: +diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c +index 94cd96b9b7bb..001dfac8354a 100644 +--- a/arch/powerpc/platforms/powernv/smp.c ++++ b/arch/powerpc/platforms/powernv/smp.c +@@ -146,20 +146,25 @@ static int pnv_smp_cpu_disable(void) + return 0; + } + ++static void pnv_flush_interrupts(void) ++{ ++ if (cpu_has_feature(CPU_FTR_ARCH_300)) { ++ if (xive_enabled()) ++ xive_flush_interrupt(); ++ else ++ icp_opal_flush_interrupt(); ++ } else { ++ icp_native_flush_interrupt(); ++ } ++} ++ + static void pnv_smp_cpu_kill_self(void) + { ++ unsigned long srr1, unexpected_mask, wmask; + unsigned int cpu; +- unsigned long srr1, wmask; + u64 lpcr_val; + + /* Standard hot unplug procedure */ +- /* +- * This hard disables local interurpts, ensuring we have no lazy +- * irqs pending. +- */ +- WARN_ON(irqs_disabled()); +- hard_irq_disable(); +- WARN_ON(lazy_irq_pending()); + + idle_task_exit(); + current->active_mm = NULL; /* for sanity */ +@@ -172,6 +177,27 @@ static void pnv_smp_cpu_kill_self(void) + if (cpu_has_feature(CPU_FTR_ARCH_207S)) + wmask = SRR1_WAKEMASK_P8; + ++ /* ++ * This turns the irq soft-disabled state we're called with, into a ++ * hard-disabled state with pending irq_happened interrupts cleared. ++ * ++ * PACA_IRQ_DEC - Decrementer should be ignored. ++ * PACA_IRQ_HMI - Can be ignored, processing is done in real mode. ++ * PACA_IRQ_DBELL, EE, PMI - Unexpected. ++ */ ++ hard_irq_disable(); ++ if (generic_check_cpu_restart(cpu)) ++ goto out; ++ ++ unexpected_mask = ~(PACA_IRQ_DEC | PACA_IRQ_HMI | PACA_IRQ_HARD_DIS); ++ if (local_paca->irq_happened & unexpected_mask) { ++ if (local_paca->irq_happened & PACA_IRQ_EE) ++ pnv_flush_interrupts(); ++ DBG("CPU%d Unexpected exit while offline irq_happened=%lx!\n", ++ cpu, local_paca->irq_happened); ++ } ++ local_paca->irq_happened = PACA_IRQ_HARD_DIS; ++ + /* + * We don't want to take decrementer interrupts while we are + * offline, so clear LPCR:PECE1. We keep PECE2 (and +@@ -197,6 +223,7 @@ static void pnv_smp_cpu_kill_self(void) + + srr1 = pnv_cpu_offline(cpu); + ++ WARN_ON_ONCE(!irqs_disabled()); + WARN_ON(lazy_irq_pending()); + + /* +@@ -212,13 +239,7 @@ static void pnv_smp_cpu_kill_self(void) + */ + if (((srr1 & wmask) == SRR1_WAKEEE) || + ((srr1 & wmask) == SRR1_WAKEHVI)) { +- if (cpu_has_feature(CPU_FTR_ARCH_300)) { +- if (xive_enabled()) +- xive_flush_interrupt(); +- else +- icp_opal_flush_interrupt(); +- } else +- icp_native_flush_interrupt(); ++ pnv_flush_interrupts(); + } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { + unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); + asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); +@@ -266,7 +287,7 @@ static void pnv_smp_cpu_kill_self(void) + */ + lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1; + pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val); +- ++out: + DBG("CPU%d coming online...\n", cpu); + } + +diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c +index 424eb72d56b1..93742df9067f 100644 +--- a/arch/riscv/kernel/traps.c ++++ b/arch/riscv/kernel/traps.c +@@ -124,24 +124,24 @@ static inline unsigned long get_break_insn_length(unsigned long pc) + + asmlinkage void do_trap_break(struct pt_regs *regs) + { +-#ifdef CONFIG_GENERIC_BUG + if (!user_mode(regs)) { + enum bug_trap_type type; + + type = report_bug(regs->sepc, regs); + switch (type) { +- case BUG_TRAP_TYPE_NONE: +- break; ++#ifdef CONFIG_GENERIC_BUG + case BUG_TRAP_TYPE_WARN: + regs->sepc += get_break_insn_length(regs->sepc); +- break; ++ return; + case BUG_TRAP_TYPE_BUG: ++#endif /* CONFIG_GENERIC_BUG */ ++ default: + die(regs, "Kernel BUG"); + } ++ } else { ++ force_sig_fault(SIGTRAP, TRAP_BRKPT, ++ (void __user *)(regs->sepc)); + } +-#endif /* CONFIG_GENERIC_BUG */ +- +- force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)(regs->sepc)); + } + + #ifdef CONFIG_GENERIC_BUG +diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h +index bd2fd9a7821d..a470f1fa9f2a 100644 +--- a/arch/s390/include/asm/uaccess.h ++++ b/arch/s390/include/asm/uaccess.h +@@ -83,7 +83,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n); + __rc; \ + }) + +-static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) ++static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) + { + unsigned long spec = 0x010000UL; + int rc; +@@ -113,7 +113,7 @@ static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) + return rc; + } + +-static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) ++static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) + { + unsigned long spec = 0x01UL; + int rc; +diff --git a/arch/s390/include/asm/unwind.h b/arch/s390/include/asm/unwind.h +index d827b5b9a32c..eaaefeceef6f 100644 +--- a/arch/s390/include/asm/unwind.h ++++ b/arch/s390/include/asm/unwind.h +@@ -35,6 +35,7 @@ struct unwind_state { + struct task_struct *task; + struct pt_regs *regs; + unsigned long sp, ip; ++ bool reuse_sp; + int graph_idx; + bool reliable; + bool error; +diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c +index b9d8fe45737a..8f8456816d83 100644 +--- a/arch/s390/kernel/idle.c ++++ b/arch/s390/kernel/idle.c +@@ -69,18 +69,26 @@ DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); + static ssize_t show_idle_time(struct device *dev, + struct device_attribute *attr, char *buf) + { ++ unsigned long long now, idle_time, idle_enter, idle_exit, in_idle; + struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); +- unsigned long long now, idle_time, idle_enter, idle_exit; + unsigned int seq; + + do { +- now = get_tod_clock(); + seq = read_seqcount_begin(&idle->seqcount); + idle_time = READ_ONCE(idle->idle_time); + idle_enter = READ_ONCE(idle->clock_idle_enter); + idle_exit = READ_ONCE(idle->clock_idle_exit); + } while (read_seqcount_retry(&idle->seqcount, seq)); +- idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; ++ in_idle = 0; ++ now = get_tod_clock(); ++ if (idle_enter) { ++ if (idle_exit) { ++ in_idle = idle_exit - idle_enter; ++ } else if (now > idle_enter) { ++ in_idle = now - idle_enter; ++ } ++ } ++ idle_time += in_idle; + return sprintf(buf, "%llu\n", idle_time >> 12); + } + DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); +@@ -88,17 +96,24 @@ DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); + u64 arch_cpu_idle_time(int cpu) + { + struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); +- unsigned long long now, idle_enter, idle_exit; ++ unsigned long long now, idle_enter, idle_exit, in_idle; + unsigned int seq; + + do { +- now = get_tod_clock(); + seq = read_seqcount_begin(&idle->seqcount); + idle_enter = READ_ONCE(idle->clock_idle_enter); + idle_exit = READ_ONCE(idle->clock_idle_exit); + } while (read_seqcount_retry(&idle->seqcount, seq)); +- +- return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0); ++ in_idle = 0; ++ now = get_tod_clock(); ++ if (idle_enter) { ++ if (idle_exit) { ++ in_idle = idle_exit - idle_enter; ++ } else if (now > idle_enter) { ++ in_idle = now - idle_enter; ++ } ++ } ++ return cputime_to_nsecs(in_idle); + } + + void arch_cpu_idle_enter(void) +diff --git a/arch/s390/kernel/unwind_bc.c b/arch/s390/kernel/unwind_bc.c +index 8fc9daae47a2..a8204f952315 100644 +--- a/arch/s390/kernel/unwind_bc.c ++++ b/arch/s390/kernel/unwind_bc.c +@@ -46,10 +46,15 @@ bool unwind_next_frame(struct unwind_state *state) + + regs = state->regs; + if (unlikely(regs)) { +- sp = READ_ONCE_NOCHECK(regs->gprs[15]); +- if (unlikely(outside_of_stack(state, sp))) { +- if (!update_stack_info(state, sp)) +- goto out_err; ++ if (state->reuse_sp) { ++ sp = state->sp; ++ state->reuse_sp = false; ++ } else { ++ sp = READ_ONCE_NOCHECK(regs->gprs[15]); ++ if (unlikely(outside_of_stack(state, sp))) { ++ if (!update_stack_info(state, sp)) ++ goto out_err; ++ } + } + sf = (struct stack_frame *) sp; + ip = READ_ONCE_NOCHECK(sf->gprs[8]); +@@ -107,9 +112,9 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, + { + struct stack_info *info = &state->stack_info; + unsigned long *mask = &state->stack_mask; ++ bool reliable, reuse_sp; + struct stack_frame *sf; + unsigned long ip; +- bool reliable; + + memset(state, 0, sizeof(*state)); + state->task = task; +@@ -134,10 +139,12 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, + if (regs) { + ip = READ_ONCE_NOCHECK(regs->psw.addr); + reliable = true; ++ reuse_sp = true; + } else { + sf = (struct stack_frame *) sp; + ip = READ_ONCE_NOCHECK(sf->gprs[8]); + reliable = false; ++ reuse_sp = false; + } + + #ifdef CONFIG_FUNCTION_GRAPH_TRACER +@@ -151,5 +158,6 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task, + state->sp = sp; + state->ip = ip; + state->reliable = reliable; ++ state->reuse_sp = reuse_sp; + } + EXPORT_SYMBOL_GPL(__unwind_start); +diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c +index 510a18299196..a51c892f14f3 100644 +--- a/arch/s390/mm/cmm.c ++++ b/arch/s390/mm/cmm.c +@@ -298,16 +298,16 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write, + } + + if (write) { +- len = *lenp; +- if (copy_from_user(buf, buffer, +- len > sizeof(buf) ? sizeof(buf) : len)) ++ len = min(*lenp, sizeof(buf)); ++ if (copy_from_user(buf, buffer, len)) + return -EFAULT; +- buf[sizeof(buf) - 1] = '\0'; ++ buf[len - 1] = '\0'; + cmm_skip_blanks(buf, &p); + nr = simple_strtoul(p, &p, 0); + cmm_skip_blanks(p, &p); + seconds = simple_strtoul(p, &p, 0); + cmm_set_timeout(nr, seconds); ++ *ppos += *lenp; + } else { + len = sprintf(buf, "%ld %ld\n", + cmm_timeout_pages, cmm_timeout_seconds); +@@ -315,9 +315,9 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write, + len = *lenp; + if (copy_to_user(buffer, buf, len)) + return -EFAULT; ++ *lenp = len; ++ *ppos += len; + } +- *lenp = len; +- *ppos += len; + return 0; + } + +diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c +index d80616ae8dd8..fbe97ab2e228 100644 +--- a/arch/s390/pci/pci_irq.c ++++ b/arch/s390/pci/pci_irq.c +@@ -284,7 +284,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) + return rc; + irq_set_chip_and_handler(irq, &zpci_irq_chip, + handle_percpu_irq); +- msg.data = hwirq; ++ msg.data = hwirq - bit; + if (irq_delivery == DIRECTED) { + msg.address_lo = zdev->msi_addr & 0xff0000ff; + msg.address_lo |= msi->affinity ? +diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c +index 33c1cd6a12ac..40ab9ad7aa96 100644 +--- a/arch/um/drivers/ubd_kern.c ++++ b/arch/um/drivers/ubd_kern.c +@@ -1403,8 +1403,12 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, + + spin_unlock_irq(&ubd_dev->lock); + +- if (ret < 0) +- blk_mq_requeue_request(req, true); ++ if (ret < 0) { ++ if (ret == -ENOMEM) ++ res = BLK_STS_RESOURCE; ++ else ++ res = BLK_STS_DEV_RESOURCE; ++ } + + return res; + } +diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c +index e7d35f60d53f..64c3e70b0556 100644 +--- a/arch/x86/events/amd/core.c ++++ b/arch/x86/events/amd/core.c +@@ -5,12 +5,14 @@ + #include + #include + #include ++#include + #include + #include + + #include "../perf_event.h" + +-static DEFINE_PER_CPU(unsigned int, perf_nmi_counter); ++static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp); ++static unsigned long perf_nmi_window; + + static __initconst const u64 amd_hw_cache_event_ids + [PERF_COUNT_HW_CACHE_MAX] +@@ -641,11 +643,12 @@ static void amd_pmu_disable_event(struct perf_event *event) + * handler when multiple PMCs are active or PMC overflow while handling some + * other source of an NMI. + * +- * Attempt to mitigate this by using the number of active PMCs to determine +- * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset +- * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the +- * number of active PMCs or 2. The value of 2 is used in case an NMI does not +- * arrive at the LAPIC in time to be collapsed into an already pending NMI. ++ * Attempt to mitigate this by creating an NMI window in which un-handled NMIs ++ * received during this window will be claimed. This prevents extending the ++ * window past when it is possible that latent NMIs should be received. The ++ * per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has ++ * handled a counter. When an un-handled NMI is received, it will be claimed ++ * only if arriving within that window. + */ + static int amd_pmu_handle_irq(struct pt_regs *regs) + { +@@ -663,21 +666,19 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) + handled = x86_pmu_handle_irq(regs); + + /* +- * If a counter was handled, record the number of possible remaining +- * NMIs that can occur. ++ * If a counter was handled, record a timestamp such that un-handled ++ * NMIs will be claimed if arriving within that window. + */ + if (handled) { +- this_cpu_write(perf_nmi_counter, +- min_t(unsigned int, 2, active)); ++ this_cpu_write(perf_nmi_tstamp, ++ jiffies + perf_nmi_window); + + return handled; + } + +- if (!this_cpu_read(perf_nmi_counter)) ++ if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp))) + return NMI_DONE; + +- this_cpu_dec(perf_nmi_counter); +- + return NMI_HANDLED; + } + +@@ -909,6 +910,9 @@ static int __init amd_core_pmu_init(void) + if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE)) + return 0; + ++ /* Avoid calulating the value each time in the NMI handler */ ++ perf_nmi_window = msecs_to_jiffies(100); ++ + switch (boot_cpu_data.x86) { + case 0x15: + pr_cont("Fam15h "); +diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h +index 9ae1c0f05fd2..3525014c71da 100644 +--- a/arch/x86/include/asm/intel-family.h ++++ b/arch/x86/include/asm/intel-family.h +@@ -76,6 +76,9 @@ + #define INTEL_FAM6_TIGERLAKE_L 0x8C + #define INTEL_FAM6_TIGERLAKE 0x8D + ++#define INTEL_FAM6_COMETLAKE 0xA5 ++#define INTEL_FAM6_COMETLAKE_L 0xA6 ++ + /* "Small Core" Processors (Atom) */ + + #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 45e425c5e6f5..fe887f723708 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -736,8 +736,14 @@ static int get_npt_level(struct kvm_vcpu *vcpu) + static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) + { + vcpu->arch.efer = efer; +- if (!npt_enabled && !(efer & EFER_LMA)) +- efer &= ~EFER_LME; ++ ++ if (!npt_enabled) { ++ /* Shadow paging assumes NX to be available. */ ++ efer |= EFER_NX; ++ ++ if (!(efer & EFER_LMA)) ++ efer &= ~EFER_LME; ++ } + + to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; + mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR); +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index 1d11bf4bab8b..2a0e281542cc 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -897,17 +897,9 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) + u64 guest_efer = vmx->vcpu.arch.efer; + u64 ignore_bits = 0; + +- if (!enable_ept) { +- /* +- * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing +- * host CPUID is more efficient than testing guest CPUID +- * or CR4. Host SMEP is anyway a requirement for guest SMEP. +- */ +- if (boot_cpu_has(X86_FEATURE_SMEP)) +- guest_efer |= EFER_NX; +- else if (!(guest_efer & EFER_NX)) +- ignore_bits |= EFER_NX; +- } ++ /* Shadow paging assumes NX to be available. */ ++ if (!enable_ept) ++ guest_efer |= EFER_NX; + + /* + * LMA and LME handled by hardware; SCE meaningless outside long mode. +diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c +index a7189a3b4d70..3304f61538a2 100644 +--- a/arch/x86/platform/efi/efi.c ++++ b/arch/x86/platform/efi/efi.c +@@ -894,9 +894,6 @@ static void __init kexec_enter_virtual_mode(void) + + if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) + runtime_code_page_mkexec(); +- +- /* clean DUMMY object */ +- efi_delete_dummy_variable(); + #endif + } + +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c +index 750f46ad018a..205b1176084f 100644 +--- a/arch/x86/xen/enlighten.c ++++ b/arch/x86/xen/enlighten.c +@@ -269,19 +269,41 @@ void xen_reboot(int reason) + BUG(); + } + ++static int reboot_reason = SHUTDOWN_reboot; ++static bool xen_legacy_crash; + void xen_emergency_restart(void) + { +- xen_reboot(SHUTDOWN_reboot); ++ xen_reboot(reboot_reason); + } + + static int + xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) + { +- if (!kexec_crash_loaded()) +- xen_reboot(SHUTDOWN_crash); ++ if (!kexec_crash_loaded()) { ++ if (xen_legacy_crash) ++ xen_reboot(SHUTDOWN_crash); ++ ++ reboot_reason = SHUTDOWN_crash; ++ ++ /* ++ * If panic_timeout==0 then we are supposed to wait forever. ++ * However, to preserve original dom0 behavior we have to drop ++ * into hypervisor. (domU behavior is controlled by its ++ * config file) ++ */ ++ if (panic_timeout == 0) ++ panic_timeout = -1; ++ } + return NOTIFY_DONE; + } + ++static int __init parse_xen_legacy_crash(char *arg) ++{ ++ xen_legacy_crash = true; ++ return 0; ++} ++early_param("xen_legacy_crash", parse_xen_legacy_crash); ++ + static struct notifier_block xen_panic_block = { + .notifier_call = xen_panic_event, + .priority = INT_MIN +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c +index 0b727f7432f9..9650777d0aaf 100644 +--- a/drivers/block/nbd.c ++++ b/drivers/block/nbd.c +@@ -230,8 +230,8 @@ static void nbd_put(struct nbd_device *nbd) + if (refcount_dec_and_mutex_lock(&nbd->refs, + &nbd_index_mutex)) { + idr_remove(&nbd_index_idr, nbd->index); +- mutex_unlock(&nbd_index_mutex); + nbd_dev_remove(nbd); ++ mutex_unlock(&nbd_index_mutex); + } + } + +@@ -935,6 +935,25 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, + return ret; + } + ++static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd, ++ int *err) ++{ ++ struct socket *sock; ++ ++ *err = 0; ++ sock = sockfd_lookup(fd, err); ++ if (!sock) ++ return NULL; ++ ++ if (sock->ops->shutdown == sock_no_shutdown) { ++ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n"); ++ *err = -EINVAL; ++ return NULL; ++ } ++ ++ return sock; ++} ++ + static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, + bool netlink) + { +@@ -944,7 +963,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, + struct nbd_sock *nsock; + int err; + +- sock = sockfd_lookup(arg, &err); ++ sock = nbd_get_socket(nbd, arg, &err); + if (!sock) + return err; + +@@ -996,7 +1015,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) + int i; + int err; + +- sock = sockfd_lookup(arg, &err); ++ sock = nbd_get_socket(nbd, arg, &err); + if (!sock) + return err; + +diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c +index a01f4b5d793c..be9ef4dd756f 100644 +--- a/drivers/dma/imx-sdma.c ++++ b/drivers/dma/imx-sdma.c +@@ -1707,6 +1707,14 @@ static void sdma_add_scripts(struct sdma_engine *sdma, + if (!sdma->script_number) + sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; + ++ if (sdma->script_number > sizeof(struct sdma_script_start_addrs) ++ / sizeof(s32)) { ++ dev_err(sdma->dev, ++ "SDMA script number %d not match with firmware.\n", ++ sdma->script_number); ++ return; ++ } ++ + for (i = 0; i < sdma->script_number; i++) + if (addr_arr[i] > 0) + saddr_arr[i] = addr_arr[i]; +diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c +index 8e90a405939d..ef73f65224b1 100644 +--- a/drivers/dma/qcom/bam_dma.c ++++ b/drivers/dma/qcom/bam_dma.c +@@ -694,6 +694,25 @@ static int bam_dma_terminate_all(struct dma_chan *chan) + + /* remove all transactions, including active transaction */ + spin_lock_irqsave(&bchan->vc.lock, flag); ++ /* ++ * If we have transactions queued, then some might be committed to the ++ * hardware in the desc fifo. The only way to reset the desc fifo is ++ * to do a hardware reset (either by pipe or the entire block). ++ * bam_chan_init_hw() will trigger a pipe reset, and also reinit the ++ * pipe. If the pipe is left disabled (default state after pipe reset) ++ * and is accessed by a connected hardware engine, a fatal error in ++ * the BAM will occur. There is a small window where this could happen ++ * with bam_chan_init_hw(), but it is assumed that the caller has ++ * stopped activity on any attached hardware engine. Make sure to do ++ * this first so that the BAM hardware doesn't cause memory corruption ++ * by accessing freed resources. ++ */ ++ if (!list_empty(&bchan->desc_list)) { ++ async_desc = list_first_entry(&bchan->desc_list, ++ struct bam_async_desc, desc_node); ++ bam_chan_init_hw(bchan, async_desc->dir); ++ } ++ + list_for_each_entry_safe(async_desc, tmp, + &bchan->desc_list, desc_node) { + list_add(&async_desc->vd.node, &bchan->vc.desc_issued); +diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c +index b33cf6e8ab8e..d13fe1030a3e 100644 +--- a/drivers/dma/tegra210-adma.c ++++ b/drivers/dma/tegra210-adma.c +@@ -40,6 +40,7 @@ + #define ADMA_CH_CONFIG_MAX_BURST_SIZE 16 + #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf) + #define ADMA_CH_CONFIG_MAX_BUFS 8 ++#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4) + + #define ADMA_CH_FIFO_CTRL 0x2c + #define TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0xf) << 24) +@@ -85,6 +86,7 @@ struct tegra_adma; + * @ch_req_tx_shift: Register offset for AHUB transmit channel select. + * @ch_req_rx_shift: Register offset for AHUB receive channel select. + * @ch_base_offset: Register offset of DMA channel registers. ++ * @has_outstanding_reqs: If DMA channel can have outstanding requests. + * @ch_fifo_ctrl: Default value for channel FIFO CTRL register. + * @ch_req_mask: Mask for Tx or Rx channel select. + * @ch_req_max: Maximum number of Tx or Rx channels available. +@@ -103,6 +105,7 @@ struct tegra_adma_chip_data { + unsigned int ch_req_max; + unsigned int ch_reg_size; + unsigned int nr_channels; ++ bool has_outstanding_reqs; + }; + + /* +@@ -602,6 +605,8 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc, + ADMA_CH_CTRL_FLOWCTRL_EN; + ch_regs->config |= cdata->adma_get_burst_config(burst_size); + ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1); ++ if (cdata->has_outstanding_reqs) ++ ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8); + ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl; + ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; + +@@ -786,6 +791,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = { + .ch_req_tx_shift = 28, + .ch_req_rx_shift = 24, + .ch_base_offset = 0, ++ .has_outstanding_reqs = false, + .ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT, + .ch_req_mask = 0xf, + .ch_req_max = 10, +@@ -800,6 +806,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = { + .ch_req_tx_shift = 27, + .ch_req_rx_shift = 22, + .ch_base_offset = 0x10000, ++ .has_outstanding_reqs = true, + .ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT, + .ch_req_mask = 0x1f, + .ch_req_max = 20, +diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c +index 2f946f55076c..8c2f7ebe998c 100644 +--- a/drivers/dma/ti/cppi41.c ++++ b/drivers/dma/ti/cppi41.c +@@ -586,9 +586,22 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( + enum dma_transfer_direction dir, unsigned long tx_flags, void *context) + { + struct cppi41_channel *c = to_cpp41_chan(chan); ++ struct dma_async_tx_descriptor *txd = NULL; ++ struct cppi41_dd *cdd = c->cdd; + struct cppi41_desc *d; + struct scatterlist *sg; + unsigned int i; ++ int error; ++ ++ error = pm_runtime_get(cdd->ddev.dev); ++ if (error < 0) { ++ pm_runtime_put_noidle(cdd->ddev.dev); ++ ++ return NULL; ++ } ++ ++ if (cdd->is_suspended) ++ goto err_out_not_ready; + + d = c->desc; + for_each_sg(sgl, sg, sg_len, i) { +@@ -611,7 +624,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( + d++; + } + +- return &c->txd; ++ txd = &c->txd; ++ ++err_out_not_ready: ++ pm_runtime_mark_last_busy(cdd->ddev.dev); ++ pm_runtime_put_autosuspend(cdd->ddev.dev); ++ ++ return txd; + } + + static void cppi41_compute_td_desc(struct cppi41_desc *d) +diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c +index addf0749dd8b..b1af0de2e100 100644 +--- a/drivers/firmware/efi/cper.c ++++ b/drivers/firmware/efi/cper.c +@@ -381,7 +381,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie, + printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx, + pcie->device_id.vendor_id, pcie->device_id.device_id); + p = pcie->device_id.class_code; +- printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]); ++ printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]); + } + if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER) + printk("%s""serial number: 0x%04x, 0x%04x\n", pfx, +diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c +index b7d89e30131e..06e8caaafa81 100644 +--- a/drivers/gpio/gpio-max77620.c ++++ b/drivers/gpio/gpio-max77620.c +@@ -192,13 +192,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio, + case 0: + val = MAX77620_CNFG_GPIO_DBNC_None; + break; +- case 1 ... 8: ++ case 1000 ... 8000: + val = MAX77620_CNFG_GPIO_DBNC_8ms; + break; +- case 9 ... 16: ++ case 9000 ... 16000: + val = MAX77620_CNFG_GPIO_DBNC_16ms; + break; +- case 17 ... 32: ++ case 17000 ... 32000: + val = MAX77620_CNFG_GPIO_DBNC_32ms; + break; + default: +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +index 7bcf86c61999..61e38e43ad1d 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +@@ -270,7 +270,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, + + r = amdgpu_bo_create_list_entry_array(&args->in, &info); + if (r) +- goto error_free; ++ return r; + + switch (args->in.operation) { + case AMDGPU_BO_LIST_OP_CREATE: +@@ -283,8 +283,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, + r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL); + mutex_unlock(&fpriv->bo_list_lock); + if (r < 0) { +- amdgpu_bo_list_put(list); +- return r; ++ goto error_put_list; + } + + handle = r; +@@ -306,9 +305,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, + mutex_unlock(&fpriv->bo_list_lock); + + if (IS_ERR(old)) { +- amdgpu_bo_list_put(list); + r = PTR_ERR(old); +- goto error_free; ++ goto error_put_list; + } + + amdgpu_bo_list_put(old); +@@ -325,8 +323,10 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, + + return 0; + ++error_put_list: ++ amdgpu_bo_list_put(list); ++ + error_free: +- if (info) +- kvfree(info); ++ kvfree(info); + return r; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +index f41287f9000d..8cd6a6f94542 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +@@ -67,7 +67,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] = + { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100), +- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100), ++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100), +diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +index d605b4963f8a..141727ce7e76 100644 +--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +@@ -151,6 +151,15 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev) + WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp); + + tmp = mmGCVM_L2_CNTL3_DEFAULT; ++ if (adev->gmc.translate_further) { ++ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12); ++ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, ++ L2_CACHE_BIGK_FRAGMENT_SIZE, 9); ++ } else { ++ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9); ++ tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, ++ L2_CACHE_BIGK_FRAGMENT_SIZE, 6); ++ } + WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp); + + tmp = mmGCVM_L2_CNTL4_DEFAULT; +diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +index 0f9549f19ade..9e5c3a1909c7 100644 +--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +@@ -137,6 +137,15 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev) + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp); + + tmp = mmMMVM_L2_CNTL3_DEFAULT; ++ if (adev->gmc.translate_further) { ++ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12); ++ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, ++ L2_CACHE_BIGK_FRAGMENT_SIZE, 9); ++ } else { ++ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9); ++ tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, ++ L2_CACHE_BIGK_FRAGMENT_SIZE, 6); ++ } + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp); + + tmp = mmMMVM_L2_CNTL4_DEFAULT; +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +index 4428018672d3..4f14ef813dda 100644 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +@@ -159,6 +159,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), ++ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000) + }; + + static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +index 3be8eb21fd6e..64be81eea9b4 100644 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +@@ -5097,9 +5097,7 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr, + + if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { + podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; +- for (i = 0; i < podn_vdd_dep->count - 1; i++) +- od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc; +- if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc) ++ for (i = 0; i < podn_vdd_dep->count; i++) + od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc; + } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { + podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; +diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c +index 9b61fae5aef7..dae45b6a35b7 100644 +--- a/drivers/gpu/drm/i915/display/intel_display.c ++++ b/drivers/gpu/drm/i915/display/intel_display.c +@@ -9186,7 +9186,6 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, + static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) + { + struct intel_encoder *encoder; +- bool pch_ssc_in_use = false; + bool has_fdi = false; + + for_each_intel_encoder(&dev_priv->drm, encoder) { +@@ -9214,22 +9213,24 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) + * clock hierarchy. That would also allow us to do + * clock bending finally. + */ ++ dev_priv->pch_ssc_use = 0; ++ + if (spll_uses_pch_ssc(dev_priv)) { + DRM_DEBUG_KMS("SPLL using PCH SSC\n"); +- pch_ssc_in_use = true; ++ dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); + } + + if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { + DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n"); +- pch_ssc_in_use = true; ++ dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); + } + + if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { + DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n"); +- pch_ssc_in_use = true; ++ dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); + } + +- if (pch_ssc_in_use) ++ if (dev_priv->pch_ssc_use) + return; + + if (has_fdi) { +diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +index 2d4e7b9a7b9d..f199a6769962 100644 +--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c ++++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +@@ -498,16 +498,31 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, + val = I915_READ(WRPLL_CTL(id)); + I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE); + POSTING_READ(WRPLL_CTL(id)); ++ ++ /* ++ * Try to set up the PCH reference clock once all DPLLs ++ * that depend on it have been shut down. ++ */ ++ if (dev_priv->pch_ssc_use & BIT(id)) ++ intel_init_pch_refclk(dev_priv); + } + + static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) + { ++ enum intel_dpll_id id = pll->info->id; + u32 val; + + val = I915_READ(SPLL_CTL); + I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); + POSTING_READ(SPLL_CTL); ++ ++ /* ++ * Try to set up the PCH reference clock once all DPLLs ++ * that depend on it have been shut down. ++ */ ++ if (dev_priv->pch_ssc_use & BIT(id)) ++ intel_init_pch_refclk(dev_priv); + } + + static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index 94b91a952699..edb88406cb75 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -1881,6 +1881,8 @@ struct drm_i915_private { + struct work_struct idle_work; + } gem; + ++ u8 pch_ssc_use; ++ + /* For i945gm vblank irq vs. C3 workaround */ + struct { + struct work_struct work; +diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c +index 6654c1550e2e..fbe4e16ab029 100644 +--- a/drivers/hid/hid-axff.c ++++ b/drivers/hid/hid-axff.c +@@ -63,13 +63,20 @@ static int axff_init(struct hid_device *hid) + { + struct axff_device *axff; + struct hid_report *report; +- struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list); ++ struct hid_input *hidinput; + struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list; +- struct input_dev *dev = hidinput->input; ++ struct input_dev *dev; + int field_count = 0; + int i, j; + int error; + ++ if (list_empty(&hid->inputs)) { ++ hid_err(hid, "no inputs found\n"); ++ return -ENODEV; ++ } ++ hidinput = list_first_entry(&hid->inputs, struct hid_input, list); ++ dev = hidinput->input; ++ + if (list_empty(report_list)) { + hid_err(hid, "no output reports found\n"); + return -ENODEV; +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c +index 210b81a56e1a..3af76624e4aa 100644 +--- a/drivers/hid/hid-core.c ++++ b/drivers/hid/hid-core.c +@@ -1139,6 +1139,7 @@ int hid_open_report(struct hid_device *device) + __u8 *start; + __u8 *buf; + __u8 *end; ++ __u8 *next; + int ret; + static int (*dispatch_type[])(struct hid_parser *parser, + struct hid_item *item) = { +@@ -1192,7 +1193,8 @@ int hid_open_report(struct hid_device *device) + device->collection_size = HID_DEFAULT_NUM_COLLECTIONS; + + ret = -EINVAL; +- while ((start = fetch_item(start, end, &item)) != NULL) { ++ while ((next = fetch_item(start, end, &item)) != NULL) { ++ start = next; + + if (item.format != HID_ITEM_FORMAT_SHORT) { + hid_err(device, "unexpected long global item\n"); +@@ -1230,7 +1232,8 @@ int hid_open_report(struct hid_device *device) + } + } + +- hid_err(device, "item fetching failed at offset %d\n", (int)(end - start)); ++ hid_err(device, "item fetching failed at offset %u/%u\n", ++ size - (unsigned int)(end - start), size); + err: + kfree(parser->collection_stack); + alloc_err: +diff --git a/drivers/hid/hid-dr.c b/drivers/hid/hid-dr.c +index 17e17f9a597b..947f19f8685f 100644 +--- a/drivers/hid/hid-dr.c ++++ b/drivers/hid/hid-dr.c +@@ -75,13 +75,19 @@ static int drff_init(struct hid_device *hid) + { + struct drff_device *drff; + struct hid_report *report; +- struct hid_input *hidinput = list_first_entry(&hid->inputs, +- struct hid_input, list); ++ struct hid_input *hidinput; + struct list_head *report_list = + &hid->report_enum[HID_OUTPUT_REPORT].report_list; +- struct input_dev *dev = hidinput->input; ++ struct input_dev *dev; + int error; + ++ if (list_empty(&hid->inputs)) { ++ hid_err(hid, "no inputs found\n"); ++ return -ENODEV; ++ } ++ hidinput = list_first_entry(&hid->inputs, struct hid_input, list); ++ dev = hidinput->input; ++ + if (list_empty(report_list)) { + hid_err(hid, "no output reports found\n"); + return -ENODEV; +diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c +index 7cd5651872d3..c34f2e5a049f 100644 +--- a/drivers/hid/hid-emsff.c ++++ b/drivers/hid/hid-emsff.c +@@ -47,13 +47,19 @@ static int emsff_init(struct hid_device *hid) + { + struct emsff_device *emsff; + struct hid_report *report; +- struct hid_input *hidinput = list_first_entry(&hid->inputs, +- struct hid_input, list); ++ struct hid_input *hidinput; + struct list_head *report_list = + &hid->report_enum[HID_OUTPUT_REPORT].report_list; +- struct input_dev *dev = hidinput->input; ++ struct input_dev *dev; + int error; + ++ if (list_empty(&hid->inputs)) { ++ hid_err(hid, "no inputs found\n"); ++ return -ENODEV; ++ } ++ hidinput = list_first_entry(&hid->inputs, struct hid_input, list); ++ dev = hidinput->input; ++ + if (list_empty(report_list)) { + hid_err(hid, "no output reports found\n"); + return -ENODEV; +diff --git a/drivers/hid/hid-gaff.c b/drivers/hid/hid-gaff.c +index 0f95c96b70f8..ecbd3995a4eb 100644 +--- a/drivers/hid/hid-gaff.c ++++ b/drivers/hid/hid-gaff.c +@@ -64,14 +64,20 @@ static int gaff_init(struct hid_device *hid) + { + struct gaff_device *gaff; + struct hid_report *report; +- struct hid_input *hidinput = list_entry(hid->inputs.next, +- struct hid_input, list); ++ struct hid_input *hidinput; + struct list_head *report_list = + &hid->report_enum[HID_OUTPUT_REPORT].report_list; + struct list_head *report_ptr = report_list; +- struct input_dev *dev = hidinput->input; ++ struct input_dev *dev; + int error; + ++ if (list_empty(&hid->inputs)) { ++ hid_err(hid, "no inputs found\n"); ++ return -ENODEV; ++ } ++ hidinput = list_entry(hid->inputs.next, struct hid_input, list); ++ dev = hidinput->input; ++ + if (list_empty(report_list)) { + hid_err(hid, "no output reports found\n"); + return -ENODEV; +diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c +index 10a720558830..8619b80c834c 100644 +--- a/drivers/hid/hid-holtekff.c ++++ b/drivers/hid/hid-holtekff.c +@@ -124,13 +124,19 @@ static int holtekff_init(struct hid_device *hid) + { + struct holtekff_device *holtekff; + struct hid_report *report; +- struct hid_input *hidinput = list_entry(hid->inputs.next, +- struct hid_input, list); ++ struct hid_input *hidinput; + struct list_head *report_list = + &hid->report_enum[HID_OUTPUT_REPORT].report_list; +- struct input_dev *dev = hidinput->input; ++ struct input_dev *dev; + int error; + ++ if (list_empty(&hid->inputs)) { ++ hid_err(hid, "no inputs found\n"); ++ return -ENODEV; ++ } ++ hidinput = list_entry(hid->inputs.next, struct hid_input, list); ++ dev = hidinput->input; ++ + if (list_empty(report_list)) { + hid_err(hid, "no output report found\n"); + return -ENODEV; +diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c +index 7795831d37c2..f36316320075 100644 +--- a/drivers/hid/hid-hyperv.c ++++ b/drivers/hid/hid-hyperv.c +@@ -314,60 +314,24 @@ static void mousevsc_on_receive(struct hv_device *device, + + static void mousevsc_on_channel_callback(void *context) + { +- const int packet_size = 0x100; +- int ret; + struct hv_device *device = context; +- u32 bytes_recvd; +- u64 req_id; + struct vmpacket_descriptor *desc; +- unsigned char *buffer; +- int bufferlen = packet_size; +- +- buffer = kmalloc(bufferlen, GFP_ATOMIC); +- if (!buffer) +- return; +- +- do { +- ret = vmbus_recvpacket_raw(device->channel, buffer, +- bufferlen, &bytes_recvd, &req_id); +- +- switch (ret) { +- case 0: +- if (bytes_recvd <= 0) { +- kfree(buffer); +- return; +- } +- desc = (struct vmpacket_descriptor *)buffer; +- +- switch (desc->type) { +- case VM_PKT_COMP: +- break; +- +- case VM_PKT_DATA_INBAND: +- mousevsc_on_receive(device, desc); +- break; +- +- default: +- pr_err("unhandled packet type %d, tid %llx len %d\n", +- desc->type, req_id, bytes_recvd); +- break; +- } + ++ foreach_vmbus_pkt(desc, device->channel) { ++ switch (desc->type) { ++ case VM_PKT_COMP: + break; + +- case -ENOBUFS: +- kfree(buffer); +- /* Handle large packet */ +- bufferlen = bytes_recvd; +- buffer = kmalloc(bytes_recvd, GFP_ATOMIC); +- +- if (!buffer) +- return; ++ case VM_PKT_DATA_INBAND: ++ mousevsc_on_receive(device, desc); ++ break; + ++ default: ++ pr_err("Unhandled packet type %d, tid %llx len %d\n", ++ desc->type, desc->trans_id, desc->len8 * 8); + break; + } +- } while (1); +- ++ } + } + + static int mousevsc_connect_to_vsp(struct hv_device *device) +diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c +index dd1a6c3a7de6..73d07e35f12a 100644 +--- a/drivers/hid/hid-lg2ff.c ++++ b/drivers/hid/hid-lg2ff.c +@@ -50,11 +50,17 @@ int lg2ff_init(struct hid_device *hid) + { + struct lg2ff_device *lg2ff; + struct hid_report *report; +- struct hid_input *hidinput = list_entry(hid->inputs.next, +- struct hid_input, list); +- struct input_dev *dev = hidinput->input; ++ struct hid_input *hidinput; ++ struct input_dev *dev; + int error; + ++ if (list_empty(&hid->inputs)) { ++ hid_err(hid, "no inputs found\n"); ++ return -ENODEV; ++ } ++ hidinput = list_entry(hid->inputs.next, struct hid_input, list); ++ dev = hidinput->input; ++ + /* Check that the report looks ok */ + report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7); + if (!report) +diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c +index 9ecb6fd06203..b7e1949f3cf7 100644 +--- a/drivers/hid/hid-lg3ff.c ++++ b/drivers/hid/hid-lg3ff.c +@@ -117,12 +117,19 @@ static const signed short ff3_joystick_ac[] = { + + int lg3ff_init(struct hid_device *hid) + { +- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); +- struct input_dev *dev = hidinput->input; ++ struct hid_input *hidinput; ++ struct input_dev *dev; + const signed short *ff_bits = ff3_joystick_ac; + int error; + int i; + ++ if (list_empty(&hid->inputs)) { ++ hid_err(hid, "no inputs found\n"); ++ return -ENODEV; ++ } ++ hidinput = list_entry(hid->inputs.next, struct hid_input, list); ++ dev = hidinput->input; ++ + /* Check that the report looks ok */ + if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35)) + return -ENODEV; +diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c +index 03f0220062ca..5e6a0cef2a06 100644 +--- a/drivers/hid/hid-lg4ff.c ++++ b/drivers/hid/hid-lg4ff.c +@@ -1253,8 +1253,8 @@ static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_produc + + int lg4ff_init(struct hid_device *hid) + { +- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); +- struct input_dev *dev = hidinput->input; ++ struct hid_input *hidinput; ++ struct input_dev *dev; + struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; + struct hid_report *report = list_entry(report_list->next, struct hid_report, list); + const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor); +@@ -1266,6 +1266,13 @@ int lg4ff_init(struct hid_device *hid) + int mmode_ret, mmode_idx = -1; + u16 real_product_id; + ++ if (list_empty(&hid->inputs)) { ++ hid_err(hid, "no inputs found\n"); ++ return -ENODEV; ++ } ++ hidinput = list_entry(hid->inputs.next, struct hid_input, list); ++ dev = hidinput->input; ++ + /* Check that the report looks ok */ + if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7)) + return -1; +diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c +index c79a6ec43745..aed4ddc397a9 100644 +--- a/drivers/hid/hid-lgff.c ++++ b/drivers/hid/hid-lgff.c +@@ -115,12 +115,19 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude) + + int lgff_init(struct hid_device* hid) + { +- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); +- struct input_dev *dev = hidinput->input; ++ struct hid_input *hidinput; ++ struct input_dev *dev; + const signed short *ff_bits = ff_joystick; + int error; + int i; + ++ if (list_empty(&hid->inputs)) { ++ hid_err(hid, "no inputs found\n"); ++ return -ENODEV; ++ } ++ hidinput = list_entry(hid->inputs.next, struct hid_input, list); ++ dev = hidinput->input; ++ + /* Check that the report looks ok */ + if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7)) + return -ENODEV; +diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c +index 0179f7ed77e5..8e91e2f06cb4 100644 +--- a/drivers/hid/hid-logitech-hidpp.c ++++ b/drivers/hid/hid-logitech-hidpp.c +@@ -1669,6 +1669,7 @@ static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev, + + #define HIDPP_FF_EFFECTID_NONE -1 + #define HIDPP_FF_EFFECTID_AUTOCENTER -2 ++#define HIDPP_AUTOCENTER_PARAMS_LENGTH 18 + + #define HIDPP_FF_MAX_PARAMS 20 + #define HIDPP_FF_RESERVED_SLOTS 1 +@@ -2009,7 +2010,7 @@ static int hidpp_ff_erase_effect(struct input_dev *dev, int effect_id) + static void hidpp_ff_set_autocenter(struct input_dev *dev, u16 magnitude) + { + struct hidpp_ff_private_data *data = dev->ff->private; +- u8 params[18]; ++ u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH]; + + dbg_hid("Setting autocenter to %d.\n", magnitude); + +@@ -2077,23 +2078,34 @@ static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, hidpp + static void hidpp_ff_destroy(struct ff_device *ff) + { + struct hidpp_ff_private_data *data = ff->private; ++ struct hid_device *hid = data->hidpp->hid_dev; + ++ hid_info(hid, "Unloading HID++ force feedback.\n"); ++ ++ device_remove_file(&hid->dev, &dev_attr_range); ++ destroy_workqueue(data->wq); + kfree(data->effect_ids); + } + +-static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) ++static int hidpp_ff_init(struct hidpp_device *hidpp, ++ struct hidpp_ff_private_data *data) + { + struct hid_device *hid = hidpp->hid_dev; +- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); +- struct input_dev *dev = hidinput->input; ++ struct hid_input *hidinput; ++ struct input_dev *dev; + const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor); + const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice); + struct ff_device *ff; +- struct hidpp_report response; +- struct hidpp_ff_private_data *data; +- int error, j, num_slots; ++ int error, j, num_slots = data->num_effects; + u8 version; + ++ if (list_empty(&hid->inputs)) { ++ hid_err(hid, "no inputs found\n"); ++ return -ENODEV; ++ } ++ hidinput = list_entry(hid->inputs.next, struct hid_input, list); ++ dev = hidinput->input; ++ + if (!dev) { + hid_err(hid, "Struct input_dev not set!\n"); + return -EINVAL; +@@ -2109,27 +2121,17 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) + for (j = 0; hidpp_ff_effects_v2[j] >= 0; j++) + set_bit(hidpp_ff_effects_v2[j], dev->ffbit); + +- /* Read number of slots available in device */ +- error = hidpp_send_fap_command_sync(hidpp, feature_index, +- HIDPP_FF_GET_INFO, NULL, 0, &response); +- if (error) { +- if (error < 0) +- return error; +- hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", +- __func__, error); +- return -EPROTO; +- } +- +- num_slots = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS; +- + error = input_ff_create(dev, num_slots); + + if (error) { + hid_err(dev, "Failed to create FF device!\n"); + return error; + } +- +- data = kzalloc(sizeof(*data), GFP_KERNEL); ++ /* ++ * Create a copy of passed data, so we can transfer memory ++ * ownership to FF core ++ */ ++ data = kmemdup(data, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + data->effect_ids = kcalloc(num_slots, sizeof(int), GFP_KERNEL); +@@ -2145,10 +2147,7 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) + } + + data->hidpp = hidpp; +- data->feature_index = feature_index; + data->version = version; +- data->slot_autocenter = 0; +- data->num_effects = num_slots; + for (j = 0; j < num_slots; j++) + data->effect_ids[j] = -1; + +@@ -2162,68 +2161,20 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index) + ff->set_autocenter = hidpp_ff_set_autocenter; + ff->destroy = hidpp_ff_destroy; + +- +- /* reset all forces */ +- error = hidpp_send_fap_command_sync(hidpp, feature_index, +- HIDPP_FF_RESET_ALL, NULL, 0, &response); +- +- /* Read current Range */ +- error = hidpp_send_fap_command_sync(hidpp, feature_index, +- HIDPP_FF_GET_APERTURE, NULL, 0, &response); +- if (error) +- hid_warn(hidpp->hid_dev, "Failed to read range from device!\n"); +- data->range = error ? 900 : get_unaligned_be16(&response.fap.params[0]); +- + /* Create sysfs interface */ + error = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range); + if (error) + hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d!\n", error); + +- /* Read the current gain values */ +- error = hidpp_send_fap_command_sync(hidpp, feature_index, +- HIDPP_FF_GET_GLOBAL_GAINS, NULL, 0, &response); +- if (error) +- hid_warn(hidpp->hid_dev, "Failed to read gain values from device!\n"); +- data->gain = error ? 0xffff : get_unaligned_be16(&response.fap.params[0]); +- /* ignore boost value at response.fap.params[2] */ +- + /* init the hardware command queue */ + atomic_set(&data->workqueue_size, 0); + +- /* initialize with zero autocenter to get wheel in usable state */ +- hidpp_ff_set_autocenter(dev, 0); +- + hid_info(hid, "Force feedback support loaded (firmware release %d).\n", + version); + + return 0; + } + +-static int hidpp_ff_deinit(struct hid_device *hid) +-{ +- struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); +- struct input_dev *dev = hidinput->input; +- struct hidpp_ff_private_data *data; +- +- if (!dev) { +- hid_err(hid, "Struct input_dev not found!\n"); +- return -EINVAL; +- } +- +- hid_info(hid, "Unloading HID++ force feedback.\n"); +- data = dev->ff->private; +- if (!data) { +- hid_err(hid, "Private data not found!\n"); +- return -EINVAL; +- } +- +- destroy_workqueue(data->wq); +- device_remove_file(&hid->dev, &dev_attr_range); +- +- return 0; +-} +- +- + /* ************************************************************************** */ + /* */ + /* Device Support */ +@@ -2725,24 +2676,93 @@ static int k400_connect(struct hid_device *hdev, bool connected) + + #define HIDPP_PAGE_G920_FORCE_FEEDBACK 0x8123 + +-static int g920_get_config(struct hidpp_device *hidpp) ++static int g920_ff_set_autocenter(struct hidpp_device *hidpp, ++ struct hidpp_ff_private_data *data) + { ++ struct hidpp_report response; ++ u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH] = { ++ [1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART, ++ }; ++ int ret; ++ ++ /* initialize with zero autocenter to get wheel in usable state */ ++ ++ dbg_hid("Setting autocenter to 0.\n"); ++ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, ++ HIDPP_FF_DOWNLOAD_EFFECT, ++ params, ARRAY_SIZE(params), ++ &response); ++ if (ret) ++ hid_warn(hidpp->hid_dev, "Failed to autocenter device!\n"); ++ else ++ data->slot_autocenter = response.fap.params[0]; ++ ++ return ret; ++} ++ ++static int g920_get_config(struct hidpp_device *hidpp, ++ struct hidpp_ff_private_data *data) ++{ ++ struct hidpp_report response; + u8 feature_type; +- u8 feature_index; + int ret; + ++ memset(data, 0, sizeof(*data)); ++ + /* Find feature and store for later use */ + ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK, +- &feature_index, &feature_type); ++ &data->feature_index, &feature_type); + if (ret) + return ret; + +- ret = hidpp_ff_init(hidpp, feature_index); ++ /* Read number of slots available in device */ ++ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, ++ HIDPP_FF_GET_INFO, ++ NULL, 0, ++ &response); ++ if (ret) { ++ if (ret < 0) ++ return ret; ++ hid_err(hidpp->hid_dev, ++ "%s: received protocol error 0x%02x\n", __func__, ret); ++ return -EPROTO; ++ } ++ ++ data->num_effects = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS; ++ ++ /* reset all forces */ ++ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, ++ HIDPP_FF_RESET_ALL, ++ NULL, 0, ++ &response); + if (ret) +- hid_warn(hidpp->hid_dev, "Unable to initialize force feedback support, errno %d\n", +- ret); ++ hid_warn(hidpp->hid_dev, "Failed to reset all forces!\n"); + +- return 0; ++ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, ++ HIDPP_FF_GET_APERTURE, ++ NULL, 0, ++ &response); ++ if (ret) { ++ hid_warn(hidpp->hid_dev, ++ "Failed to read range from device!\n"); ++ } ++ data->range = ret ? ++ 900 : get_unaligned_be16(&response.fap.params[0]); ++ ++ /* Read the current gain values */ ++ ret = hidpp_send_fap_command_sync(hidpp, data->feature_index, ++ HIDPP_FF_GET_GLOBAL_GAINS, ++ NULL, 0, ++ &response); ++ if (ret) ++ hid_warn(hidpp->hid_dev, ++ "Failed to read gain values from device!\n"); ++ data->gain = ret ? ++ 0xffff : get_unaligned_be16(&response.fap.params[0]); ++ ++ /* ignore boost value at response.fap.params[2] */ ++ ++ return g920_ff_set_autocenter(hidpp, data); + } + + /* -------------------------------------------------------------------------- */ +@@ -3458,34 +3478,45 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id) + return report->field[0]->report_count + 1; + } + +-static bool hidpp_validate_report(struct hid_device *hdev, int id, +- int expected_length, bool optional) ++static bool hidpp_validate_device(struct hid_device *hdev) + { +- int report_length; ++ struct hidpp_device *hidpp = hid_get_drvdata(hdev); ++ int id, report_length, supported_reports = 0; + +- if (id >= HID_MAX_IDS || id < 0) { +- hid_err(hdev, "invalid HID report id %u\n", id); +- return false; ++ id = REPORT_ID_HIDPP_SHORT; ++ report_length = hidpp_get_report_length(hdev, id); ++ if (report_length) { ++ if (report_length < HIDPP_REPORT_SHORT_LENGTH) ++ goto bad_device; ++ ++ supported_reports++; + } + ++ id = REPORT_ID_HIDPP_LONG; + report_length = hidpp_get_report_length(hdev, id); +- if (!report_length) +- return optional; ++ if (report_length) { ++ if (report_length < HIDPP_REPORT_LONG_LENGTH) ++ goto bad_device; + +- if (report_length < expected_length) { +- hid_warn(hdev, "not enough values in hidpp report %d\n", id); +- return false; ++ supported_reports++; + } + +- return true; +-} ++ id = REPORT_ID_HIDPP_VERY_LONG; ++ report_length = hidpp_get_report_length(hdev, id); ++ if (report_length) { ++ if (report_length < HIDPP_REPORT_LONG_LENGTH || ++ report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH) ++ goto bad_device; + +-static bool hidpp_validate_device(struct hid_device *hdev) +-{ +- return hidpp_validate_report(hdev, REPORT_ID_HIDPP_SHORT, +- HIDPP_REPORT_SHORT_LENGTH, false) && +- hidpp_validate_report(hdev, REPORT_ID_HIDPP_LONG, +- HIDPP_REPORT_LONG_LENGTH, true); ++ supported_reports++; ++ hidpp->very_long_report_length = report_length; ++ } ++ ++ return supported_reports; ++ ++bad_device: ++ hid_warn(hdev, "not enough values in hidpp report %d\n", id); ++ return false; + } + + static bool hidpp_application_equals(struct hid_device *hdev, +@@ -3505,6 +3536,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) + int ret; + bool connected; + unsigned int connect_mask = HID_CONNECT_DEFAULT; ++ struct hidpp_ff_private_data data; + + /* report_fixup needs drvdata to be set before we call hid_parse */ + hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL); +@@ -3531,11 +3563,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) + return hid_hw_start(hdev, HID_CONNECT_DEFAULT); + } + +- hidpp->very_long_report_length = +- hidpp_get_report_length(hdev, REPORT_ID_HIDPP_VERY_LONG); +- if (hidpp->very_long_report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH) +- hidpp->very_long_report_length = HIDPP_REPORT_VERY_LONG_MAX_LENGTH; +- + if (id->group == HID_GROUP_LOGITECH_DJ_DEVICE) + hidpp->quirks |= HIDPP_QUIRK_UNIFYING; + +@@ -3614,7 +3641,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) + if (ret) + goto hid_hw_init_fail; + } else if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) { +- ret = g920_get_config(hidpp); ++ ret = g920_get_config(hidpp, &data); + if (ret) + goto hid_hw_init_fail; + } +@@ -3636,6 +3663,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) + goto hid_hw_start_fail; + } + ++ if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) { ++ ret = hidpp_ff_init(hidpp, &data); ++ if (ret) ++ hid_warn(hidpp->hid_dev, ++ "Unable to initialize force feedback support, errno %d\n", ++ ret); ++ } ++ + return ret; + + hid_hw_init_fail: +@@ -3658,9 +3693,6 @@ static void hidpp_remove(struct hid_device *hdev) + + sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group); + +- if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) +- hidpp_ff_deinit(hdev); +- + hid_hw_stop(hdev); + cancel_work_sync(&hidpp->work); + mutex_destroy(&hidpp->send_mutex); +diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c +index 8b3a922bdad3..572b5789d20f 100644 +--- a/drivers/hid/hid-microsoft.c ++++ b/drivers/hid/hid-microsoft.c +@@ -328,11 +328,17 @@ static int ms_play_effect(struct input_dev *dev, void *data, + + static int ms_init_ff(struct hid_device *hdev) + { +- struct hid_input *hidinput = list_entry(hdev->inputs.next, +- struct hid_input, list); +- struct input_dev *input_dev = hidinput->input; ++ struct hid_input *hidinput; ++ struct input_dev *input_dev; + struct ms_data *ms = hid_get_drvdata(hdev); + ++ if (list_empty(&hdev->inputs)) { ++ hid_err(hdev, "no inputs found\n"); ++ return -ENODEV; ++ } ++ hidinput = list_entry(hdev->inputs.next, struct hid_input, list); ++ input_dev = hidinput->input; ++ + if (!(ms->quirks & MS_QUIRK_FF)) + return 0; + +diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c +index 73c0f7a95e2d..4c6ed6ef31f1 100644 +--- a/drivers/hid/hid-sony.c ++++ b/drivers/hid/hid-sony.c +@@ -2254,9 +2254,15 @@ static int sony_play_effect(struct input_dev *dev, void *data, + + static int sony_init_ff(struct sony_sc *sc) + { +- struct hid_input *hidinput = list_entry(sc->hdev->inputs.next, +- struct hid_input, list); +- struct input_dev *input_dev = hidinput->input; ++ struct hid_input *hidinput; ++ struct input_dev *input_dev; ++ ++ if (list_empty(&sc->hdev->inputs)) { ++ hid_err(sc->hdev, "no inputs found\n"); ++ return -ENODEV; ++ } ++ hidinput = list_entry(sc->hdev->inputs.next, struct hid_input, list); ++ input_dev = hidinput->input; + + input_set_capability(input_dev, EV_FF, FF_RUMBLE); + return input_ff_create_memless(input_dev, NULL, sony_play_effect); +diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c +index bdfc5ff3b2c5..90acef304536 100644 +--- a/drivers/hid/hid-tmff.c ++++ b/drivers/hid/hid-tmff.c +@@ -124,12 +124,18 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits) + struct tmff_device *tmff; + struct hid_report *report; + struct list_head *report_list; +- struct hid_input *hidinput = list_entry(hid->inputs.next, +- struct hid_input, list); +- struct input_dev *input_dev = hidinput->input; ++ struct hid_input *hidinput; ++ struct input_dev *input_dev; + int error; + int i; + ++ if (list_empty(&hid->inputs)) { ++ hid_err(hid, "no inputs found\n"); ++ return -ENODEV; ++ } ++ hidinput = list_entry(hid->inputs.next, struct hid_input, list); ++ input_dev = hidinput->input; ++ + tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL); + if (!tmff) + return -ENOMEM; +diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c +index f90959e94028..3abaca045869 100644 +--- a/drivers/hid/hid-zpff.c ++++ b/drivers/hid/hid-zpff.c +@@ -54,11 +54,17 @@ static int zpff_init(struct hid_device *hid) + { + struct zpff_device *zpff; + struct hid_report *report; +- struct hid_input *hidinput = list_entry(hid->inputs.next, +- struct hid_input, list); +- struct input_dev *dev = hidinput->input; ++ struct hid_input *hidinput; ++ struct input_dev *dev; + int i, error; + ++ if (list_empty(&hid->inputs)) { ++ hid_err(hid, "no inputs found\n"); ++ return -ENODEV; ++ } ++ hidinput = list_entry(hid->inputs.next, struct hid_input, list); ++ dev = hidinput->input; ++ + for (i = 0; i < 4; i++) { + report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1); + if (!report) +diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +index 75078c83be1a..d31ea82b84c1 100644 +--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c ++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c +@@ -322,6 +322,25 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = { + }, + .driver_data = (void *)&sipodev_desc + }, ++ { ++ /* ++ * There are at least 2 Primebook C11B versions, the older ++ * version has a product-name of "Primebook C11B", and a ++ * bios version / release / firmware revision of: ++ * V2.1.2 / 05/03/2018 / 18.2 ++ * The new version has "PRIMEBOOK C11B" as product-name and a ++ * bios version / release / firmware revision of: ++ * CFALKSW05_BIOS_V1.1.2 / 11/19/2018 / 19.2 ++ * Only the older version needs this quirk, note the newer ++ * version will not match as it has a different product-name. ++ */ ++ .ident = "Trekstor Primebook C11B", ++ .matches = { ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"), ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11B"), ++ }, ++ .driver_data = (void *)&sipodev_desc ++ }, + { + .ident = "Direkt-Tek DTLAPY116-2", + .matches = { +diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c +index cf6c0e3a83d3..121b4e89f038 100644 +--- a/drivers/iio/accel/bmc150-accel-core.c ++++ b/drivers/iio/accel/bmc150-accel-core.c +@@ -117,7 +117,7 @@ + #define BMC150_ACCEL_SLEEP_1_SEC 0x0F + + #define BMC150_ACCEL_REG_TEMP 0x08 +-#define BMC150_ACCEL_TEMP_CENTER_VAL 24 ++#define BMC150_ACCEL_TEMP_CENTER_VAL 23 + + #define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2)) + #define BMC150_AUTO_SUSPEND_DELAY_MS 2000 +diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c +index 7b28d045d271..7b27306330a3 100644 +--- a/drivers/iio/adc/meson_saradc.c ++++ b/drivers/iio/adc/meson_saradc.c +@@ -1219,6 +1219,11 @@ static int meson_sar_adc_probe(struct platform_device *pdev) + if (IS_ERR(base)) + return PTR_ERR(base); + ++ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, ++ priv->param->regmap_config); ++ if (IS_ERR(priv->regmap)) ++ return PTR_ERR(priv->regmap); ++ + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (!irq) + return -EINVAL; +@@ -1228,11 +1233,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev) + if (ret) + return ret; + +- priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, +- priv->param->regmap_config); +- if (IS_ERR(priv->regmap)) +- return PTR_ERR(priv->regmap); +- + priv->clkin = devm_clk_get(&pdev->dev, "clkin"); + if (IS_ERR(priv->clkin)) { + dev_err(&pdev->dev, "failed to get clkin\n"); +diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c +index 9ac8356d9a95..4998a89d083d 100644 +--- a/drivers/iio/imu/adis_buffer.c ++++ b/drivers/iio/imu/adis_buffer.c +@@ -35,8 +35,11 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev, + return -ENOMEM; + + adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL); +- if (!adis->buffer) ++ if (!adis->buffer) { ++ kfree(adis->xfer); ++ adis->xfer = NULL; + return -ENOMEM; ++ } + + tx = adis->buffer + burst_length; + tx[0] = ADIS_READ_REG(adis->burst->reg_cmd); +@@ -78,8 +81,11 @@ int adis_update_scan_mode(struct iio_dev *indio_dev, + return -ENOMEM; + + adis->buffer = kcalloc(indio_dev->scan_bytes, 2, GFP_KERNEL); +- if (!adis->buffer) ++ if (!adis->buffer) { ++ kfree(adis->xfer); ++ adis->xfer = NULL; + return -ENOMEM; ++ } + + rx = adis->buffer; + tx = rx + scan_count; +diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c +index 66fbcd94642d..4c754a02717b 100644 +--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c ++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c +@@ -92,9 +92,11 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = { + static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw) + { + struct st_lsm6dsx_sensor *sensor; ++ u16 odr; + + sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]); +- msleep((2000U / sensor->odr) + 1); ++ odr = (hw->enable_mask & BIT(ST_LSM6DSX_ID_ACC)) ? sensor->odr : 13; ++ msleep((2000U / odr) + 1); + } + + /** +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c +index da10e6ccb43c..5920c0085d35 100644 +--- a/drivers/infiniband/core/cm.c ++++ b/drivers/infiniband/core/cm.c +@@ -4399,6 +4399,7 @@ error2: + error1: + port_modify.set_port_cap_mask = 0; + port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; ++ kfree(port); + while (--i) { + if (!rdma_cap_ib_cm(ib_device, i)) + continue; +@@ -4407,6 +4408,7 @@ error1: + ib_modify_port(ib_device, port->port_num, 0, &port_modify); + ib_unregister_mad_agent(port->mad_agent); + cm_remove_port_fs(port); ++ kfree(port); + } + free: + kfree(cm_dev); +@@ -4460,6 +4462,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) + spin_unlock_irq(&cm.state_lock); + ib_unregister_mad_agent(cur_mad_agent); + cm_remove_port_fs(port); ++ kfree(port); + } + + kfree(cm_dev); +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c +index a68d0ccf67a4..2e48b59926c1 100644 +--- a/drivers/infiniband/core/cma.c ++++ b/drivers/infiniband/core/cma.c +@@ -2396,9 +2396,10 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, + conn_id->cm_id.iw = NULL; + cma_exch(conn_id, RDMA_CM_DESTROYING); + mutex_unlock(&conn_id->handler_mutex); ++ mutex_unlock(&listen_id->handler_mutex); + cma_deref_id(conn_id); + rdma_destroy_id(&conn_id->id); +- goto out; ++ return ret; + } + + mutex_unlock(&conn_id->handler_mutex); +diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c +index 020c26976558..f42e856f3072 100644 +--- a/drivers/infiniband/core/nldev.c ++++ b/drivers/infiniband/core/nldev.c +@@ -1230,7 +1230,7 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh, + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) { + ret = -ENOMEM; +- goto err; ++ goto err_get; + } + + nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, +@@ -1787,10 +1787,6 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, + + cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); + qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); +- ret = rdma_counter_unbind_qpn(device, port, qpn, cntn); +- if (ret) +- goto err_unbind; +- + if (fill_nldev_handle(msg, device) || + nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || + nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || +@@ -1799,13 +1795,15 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh, + goto err_fill; + } + ++ ret = rdma_counter_unbind_qpn(device, port, qpn, cntn); ++ if (ret) ++ goto err_fill; ++ + nlmsg_end(msg, nlh); + ib_device_put(device); + return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + + err_fill: +- rdma_counter_bind_qpn(device, port, qpn, cntn); +-err_unbind: + nlmsg_free(msg); + err: + ib_device_put(device); +diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c +index a8b9548bd1a2..599340c1f0b8 100644 +--- a/drivers/infiniband/hw/cxgb4/device.c ++++ b/drivers/infiniband/hw/cxgb4/device.c +@@ -242,10 +242,13 @@ static void set_ep_sin6_addrs(struct c4iw_ep *ep, + } + } + +-static int dump_qp(struct c4iw_qp *qp, struct c4iw_debugfs_data *qpd) ++static int dump_qp(unsigned long id, struct c4iw_qp *qp, ++ struct c4iw_debugfs_data *qpd) + { + int space; + int cc; ++ if (id != qp->wq.sq.qid) ++ return 0; + + space = qpd->bufsize - qpd->pos - 1; + if (space == 0) +@@ -350,7 +353,7 @@ static int qp_open(struct inode *inode, struct file *file) + + xa_lock_irq(&qpd->devp->qps); + xa_for_each(&qpd->devp->qps, index, qp) +- dump_qp(qp, qpd); ++ dump_qp(index, qp, qpd); + xa_unlock_irq(&qpd->devp->qps); + + qpd->buf[qpd->pos++] = 0; +diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c +index eb9368be28c1..bbcac539777a 100644 +--- a/drivers/infiniband/hw/cxgb4/qp.c ++++ b/drivers/infiniband/hw/cxgb4/qp.c +@@ -2737,15 +2737,11 @@ int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs, + if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6) + srq->flags = T4_SRQ_LIMIT_SUPPORT; + +- ret = xa_insert_irq(&rhp->qps, srq->wq.qid, srq, GFP_KERNEL); +- if (ret) +- goto err_free_queue; +- + if (udata) { + srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL); + if (!srq_key_mm) { + ret = -ENOMEM; +- goto err_remove_handle; ++ goto err_free_queue; + } + srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL); + if (!srq_db_key_mm) { +@@ -2789,8 +2785,6 @@ err_free_srq_db_key_mm: + kfree(srq_db_key_mm); + err_free_srq_key_mm: + kfree(srq_key_mm); +-err_remove_handle: +- xa_erase_irq(&rhp->qps, srq->wq.qid); + err_free_queue: + free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, + srq->wr_waitp); +@@ -2813,8 +2807,6 @@ void c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) + rhp = srq->rhp; + + pr_debug("%s id %d\n", __func__, srq->wq.qid); +- +- xa_erase_irq(&rhp->qps, srq->wq.qid); + ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext, + ibucontext); + free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, +diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c +index 2395fd4233a7..2ed7bfd5feea 100644 +--- a/drivers/infiniband/hw/hfi1/sdma.c ++++ b/drivers/infiniband/hw/hfi1/sdma.c +@@ -1526,8 +1526,11 @@ int sdma_init(struct hfi1_devdata *dd, u8 port) + } + + ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params); +- if (ret < 0) ++ if (ret < 0) { ++ kfree(tmp_sdma_rht); + goto bail; ++ } ++ + dd->sdma_rht = tmp_sdma_rht; + + dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma); +diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c +index 6141f4edc6bf..536d974c78cf 100644 +--- a/drivers/infiniband/hw/hfi1/tid_rdma.c ++++ b/drivers/infiniband/hw/hfi1/tid_rdma.c +@@ -2728,11 +2728,6 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd, + diff = cmp_psn(psn, + flow->flow_state.r_next_psn); + if (diff > 0) { +- if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) +- restart_tid_rdma_read_req(rcd, +- qp, +- wqe); +- + /* Drop the packet.*/ + goto s_unlock; + } else if (diff < 0) { +diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c +index af5bbb35c058..ef7ba0133d28 100644 +--- a/drivers/infiniband/hw/mlx5/devx.c ++++ b/drivers/infiniband/hw/mlx5/devx.c +@@ -1275,29 +1275,6 @@ static int devx_handle_mkey_create(struct mlx5_ib_dev *dev, + return 0; + } + +-static void devx_free_indirect_mkey(struct rcu_head *rcu) +-{ +- kfree(container_of(rcu, struct devx_obj, devx_mr.rcu)); +-} +- +-/* This function to delete from the radix tree needs to be called before +- * destroying the underlying mkey. Otherwise a race might occur in case that +- * other thread will get the same mkey before this one will be deleted, +- * in that case it will fail via inserting to the tree its own data. +- * +- * Note: +- * An error in the destroy is not expected unless there is some other indirect +- * mkey which points to this one. In a kernel cleanup flow it will be just +- * destroyed in the iterative destruction call. In a user flow, in case +- * the application didn't close in the expected order it's its own problem, +- * the mkey won't be part of the tree, in both cases the kernel is safe. +- */ +-static void devx_cleanup_mkey(struct devx_obj *obj) +-{ +- xa_erase(&obj->ib_dev->mdev->priv.mkey_table, +- mlx5_base_mkey(obj->devx_mr.mmkey.key)); +-} +- + static void devx_cleanup_subscription(struct mlx5_ib_dev *dev, + struct devx_event_subscription *sub) + { +@@ -1339,8 +1316,16 @@ static int devx_obj_cleanup(struct ib_uobject *uobject, + int ret; + + dev = mlx5_udata_to_mdev(&attrs->driver_udata); +- if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) +- devx_cleanup_mkey(obj); ++ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { ++ /* ++ * The pagefault_single_data_segment() does commands against ++ * the mmkey, we must wait for that to stop before freeing the ++ * mkey, as another allocation could get the same mkey #. ++ */ ++ xa_erase(&obj->ib_dev->mdev->priv.mkey_table, ++ mlx5_base_mkey(obj->devx_mr.mmkey.key)); ++ synchronize_srcu(&dev->mr_srcu); ++ } + + if (obj->flags & DEVX_OBJ_FLAGS_DCT) + ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct); +@@ -1359,12 +1344,6 @@ static int devx_obj_cleanup(struct ib_uobject *uobject, + devx_cleanup_subscription(dev, sub_entry); + mutex_unlock(&devx_event_table->event_xa_lock); + +- if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { +- call_srcu(&dev->mr_srcu, &obj->devx_mr.rcu, +- devx_free_indirect_mkey); +- return ret; +- } +- + kfree(obj); + return ret; + } +@@ -1468,26 +1447,21 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( + &obj_id); + WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32)); + +- if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { +- err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out); +- if (err) +- goto obj_destroy; +- } +- + err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len); + if (err) +- goto err_copy; ++ goto obj_destroy; + + if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT) + obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type); +- + obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id); + ++ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) { ++ err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out); ++ if (err) ++ goto obj_destroy; ++ } + return 0; + +-err_copy: +- if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) +- devx_cleanup_mkey(obj); + obj_destroy: + if (obj->flags & DEVX_OBJ_FLAGS_DCT) + mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct); +diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h +index 9ae587b74b12..43c7353b9812 100644 +--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h ++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h +@@ -638,7 +638,6 @@ struct mlx5_ib_mw { + struct mlx5_ib_devx_mr { + struct mlx5_core_mkey mmkey; + int ndescs; +- struct rcu_head rcu; + }; + + struct mlx5_ib_umr_context { +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c +index 3401f5f6792e..a6198fe7f376 100644 +--- a/drivers/infiniband/hw/mlx5/mr.c ++++ b/drivers/infiniband/hw/mlx5/mr.c +@@ -1423,6 +1423,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + if (!mr->umem) + return -EINVAL; + ++ if (is_odp_mr(mr)) ++ return -EOPNOTSUPP; ++ + if (flags & IB_MR_REREG_TRANS) { + addr = virt_addr; + len = length; +@@ -1468,8 +1471,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + } + + mr->allocated_from_cache = 0; +- if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) +- mr->live = 1; + } else { + /* + * Send a UMR WQE +@@ -1498,7 +1499,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + + set_mr_fields(dev, mr, npages, len, access_flags); + +- update_odp_mr(mr); + return 0; + + err: +@@ -1591,13 +1591,14 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) + */ + mr->live = 0; + ++ /* Wait for all running page-fault handlers to finish. */ ++ synchronize_srcu(&dev->mr_srcu); ++ + /* dequeue pending prefetch requests for the mr */ + if (atomic_read(&mr->num_pending_prefetch)) + flush_workqueue(system_unbound_wq); + WARN_ON(atomic_read(&mr->num_pending_prefetch)); + +- /* Wait for all running page-fault handlers to finish. */ +- synchronize_srcu(&dev->mr_srcu); + /* Destroy all page mappings */ + if (umem_odp->page_list) + mlx5_ib_invalidate_range(umem_odp, +@@ -1969,14 +1970,25 @@ free: + + int mlx5_ib_dealloc_mw(struct ib_mw *mw) + { ++ struct mlx5_ib_dev *dev = to_mdev(mw->device); + struct mlx5_ib_mw *mmw = to_mmw(mw); + int err; + +- err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev, +- &mmw->mmkey); +- if (!err) +- kfree(mmw); +- return err; ++ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { ++ xa_erase_irq(&dev->mdev->priv.mkey_table, ++ mlx5_base_mkey(mmw->mmkey.key)); ++ /* ++ * pagefault_single_data_segment() may be accessing mmw under ++ * SRCU if the user bound an ODP MR to this MW. ++ */ ++ synchronize_srcu(&dev->mr_srcu); ++ } ++ ++ err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey); ++ if (err) ++ return err; ++ kfree(mmw); ++ return 0; + } + + int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, +diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c +index 430314c8abd9..52d402f39df9 100644 +--- a/drivers/infiniband/sw/siw/siw_qp.c ++++ b/drivers/infiniband/sw/siw/siw_qp.c +@@ -182,12 +182,19 @@ void siw_qp_llp_close(struct siw_qp *qp) + */ + void siw_qp_llp_write_space(struct sock *sk) + { +- struct siw_cep *cep = sk_to_cep(sk); ++ struct siw_cep *cep; + +- cep->sk_write_space(sk); ++ read_lock(&sk->sk_callback_lock); ++ ++ cep = sk_to_cep(sk); ++ if (cep) { ++ cep->sk_write_space(sk); + +- if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) +- (void)siw_sq_start(cep->qp); ++ if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) ++ (void)siw_sq_start(cep->qp); ++ } ++ ++ read_unlock(&sk->sk_callback_lock); + } + + static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size) +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index c4e0e4a9ee9e..f83a9a302f8e 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -2783,7 +2783,7 @@ static int identity_mapping(struct device *dev) + struct device_domain_info *info; + + info = dev->archdata.iommu; +- if (info && info != DUMMY_DEVICE_DOMAIN_INFO) ++ if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO) + return (info->domain == si_domain); + + return 0; +diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c +index f150f5c5492b..4fb1a40e68a0 100644 +--- a/drivers/md/dm-snap.c ++++ b/drivers/md/dm-snap.c +@@ -18,7 +18,6 @@ + #include + #include + #include +-#include + + #include "dm.h" + +@@ -107,8 +106,8 @@ struct dm_snapshot { + /* The on disk metadata handler */ + struct dm_exception_store *store; + +- /* Maximum number of in-flight COW jobs. */ +- struct semaphore cow_count; ++ unsigned in_progress; ++ struct wait_queue_head in_progress_wait; + + struct dm_kcopyd_client *kcopyd_client; + +@@ -162,8 +161,8 @@ struct dm_snapshot { + */ + #define DEFAULT_COW_THRESHOLD 2048 + +-static int cow_threshold = DEFAULT_COW_THRESHOLD; +-module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644); ++static unsigned cow_threshold = DEFAULT_COW_THRESHOLD; ++module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644); + MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write"); + + DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, +@@ -1327,7 +1326,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) + goto bad_hash_tables; + } + +- sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX); ++ init_waitqueue_head(&s->in_progress_wait); + + s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); + if (IS_ERR(s->kcopyd_client)) { +@@ -1509,9 +1508,56 @@ static void snapshot_dtr(struct dm_target *ti) + + dm_put_device(ti, s->origin); + ++ WARN_ON(s->in_progress); ++ + kfree(s); + } + ++static void account_start_copy(struct dm_snapshot *s) ++{ ++ spin_lock(&s->in_progress_wait.lock); ++ s->in_progress++; ++ spin_unlock(&s->in_progress_wait.lock); ++} ++ ++static void account_end_copy(struct dm_snapshot *s) ++{ ++ spin_lock(&s->in_progress_wait.lock); ++ BUG_ON(!s->in_progress); ++ s->in_progress--; ++ if (likely(s->in_progress <= cow_threshold) && ++ unlikely(waitqueue_active(&s->in_progress_wait))) ++ wake_up_locked(&s->in_progress_wait); ++ spin_unlock(&s->in_progress_wait.lock); ++} ++ ++static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins) ++{ ++ if (unlikely(s->in_progress > cow_threshold)) { ++ spin_lock(&s->in_progress_wait.lock); ++ if (likely(s->in_progress > cow_threshold)) { ++ /* ++ * NOTE: this throttle doesn't account for whether ++ * the caller is servicing an IO that will trigger a COW ++ * so excess throttling may result for chunks not required ++ * to be COW'd. But if cow_threshold was reached, extra ++ * throttling is unlikely to negatively impact performance. ++ */ ++ DECLARE_WAITQUEUE(wait, current); ++ __add_wait_queue(&s->in_progress_wait, &wait); ++ __set_current_state(TASK_UNINTERRUPTIBLE); ++ spin_unlock(&s->in_progress_wait.lock); ++ if (unlock_origins) ++ up_read(&_origins_lock); ++ io_schedule(); ++ remove_wait_queue(&s->in_progress_wait, &wait); ++ return false; ++ } ++ spin_unlock(&s->in_progress_wait.lock); ++ } ++ return true; ++} ++ + /* + * Flush a list of buffers. + */ +@@ -1527,7 +1573,7 @@ static void flush_bios(struct bio *bio) + } + } + +-static int do_origin(struct dm_dev *origin, struct bio *bio); ++static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit); + + /* + * Flush a list of buffers. +@@ -1540,7 +1586,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) + while (bio) { + n = bio->bi_next; + bio->bi_next = NULL; +- r = do_origin(s->origin, bio); ++ r = do_origin(s->origin, bio, false); + if (r == DM_MAPIO_REMAPPED) + generic_make_request(bio); + bio = n; +@@ -1732,7 +1778,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context) + rb_link_node(&pe->out_of_order_node, parent, p); + rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree); + } +- up(&s->cow_count); ++ account_end_copy(s); + } + + /* +@@ -1756,7 +1802,7 @@ static void start_copy(struct dm_snap_pending_exception *pe) + dest.count = src.count; + + /* Hand over to kcopyd */ +- down(&s->cow_count); ++ account_start_copy(s); + dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); + } + +@@ -1776,7 +1822,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe, + pe->full_bio = bio; + pe->full_bio_end_io = bio->bi_end_io; + +- down(&s->cow_count); ++ account_start_copy(s); + callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, + copy_callback, pe); + +@@ -1866,7 +1912,7 @@ static void zero_callback(int read_err, unsigned long write_err, void *context) + struct bio *bio = context; + struct dm_snapshot *s = bio->bi_private; + +- up(&s->cow_count); ++ account_end_copy(s); + bio->bi_status = write_err ? BLK_STS_IOERR : 0; + bio_endio(bio); + } +@@ -1880,7 +1926,7 @@ static void zero_exception(struct dm_snapshot *s, struct dm_exception *e, + dest.sector = bio->bi_iter.bi_sector; + dest.count = s->store->chunk_size; + +- down(&s->cow_count); ++ account_start_copy(s); + WARN_ON_ONCE(bio->bi_private); + bio->bi_private = s; + dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio); +@@ -1916,6 +1962,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) + if (!s->valid) + return DM_MAPIO_KILL; + ++ if (bio_data_dir(bio) == WRITE) { ++ while (unlikely(!wait_for_in_progress(s, false))) ++ ; /* wait_for_in_progress() has slept */ ++ } ++ + down_read(&s->lock); + dm_exception_table_lock(&lock); + +@@ -2112,7 +2163,7 @@ redirect_to_origin: + + if (bio_data_dir(bio) == WRITE) { + up_write(&s->lock); +- return do_origin(s->origin, bio); ++ return do_origin(s->origin, bio, false); + } + + out_unlock: +@@ -2487,15 +2538,24 @@ next_snapshot: + /* + * Called on a write from the origin driver. + */ +-static int do_origin(struct dm_dev *origin, struct bio *bio) ++static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit) + { + struct origin *o; + int r = DM_MAPIO_REMAPPED; + ++again: + down_read(&_origins_lock); + o = __lookup_origin(origin->bdev); +- if (o) ++ if (o) { ++ if (limit) { ++ struct dm_snapshot *s; ++ list_for_each_entry(s, &o->snapshots, list) ++ if (unlikely(!wait_for_in_progress(s, true))) ++ goto again; ++ } ++ + r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); ++ } + up_read(&_origins_lock); + + return r; +@@ -2608,7 +2668,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio) + dm_accept_partial_bio(bio, available_sectors); + + /* Only tell snapshots if this is a write */ +- return do_origin(o->dev, bio); ++ return do_origin(o->dev, bio, true); + } + + /* +diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c +index 98603e235cf0..a76b6c6fd660 100644 +--- a/drivers/misc/fastrpc.c ++++ b/drivers/misc/fastrpc.c +@@ -499,6 +499,7 @@ static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf, + FASTRPC_PHYS(buffer->phys), buffer->size); + if (ret < 0) { + dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); ++ kfree(a); + return -EINVAL; + } + +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index 931d9d935686..21d8fcc83c9c 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -4039,7 +4039,7 @@ out: + * this to-be-skipped slave to send a packet out. + */ + old_arr = rtnl_dereference(bond->slave_arr); +- for (idx = 0; idx < old_arr->count; idx++) { ++ for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) { + if (skipslave == old_arr->arr[idx]) { + old_arr->arr[idx] = + old_arr->arr[old_arr->count-1]; +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c +index 9231b39d18b2..c501bf2a0252 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c +@@ -112,17 +112,11 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, + u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0}; + u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {0}; + struct xarray *mkeys = &dev->priv.mkey_table; +- struct mlx5_core_mkey *deleted_mkey; + unsigned long flags; + + xa_lock_irqsave(mkeys, flags); +- deleted_mkey = __xa_erase(mkeys, mlx5_base_mkey(mkey->key)); ++ __xa_erase(mkeys, mlx5_base_mkey(mkey->key)); + xa_unlock_irqrestore(mkeys, flags); +- if (!deleted_mkey) { +- mlx5_core_dbg(dev, "failed xarray delete of mkey 0x%x\n", +- mlx5_base_mkey(mkey->key)); +- return -ENOENT; +- } + + MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY); + MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key)); +diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c +index 35f39f23d881..8f8c9ede88c2 100644 +--- a/drivers/net/usb/sr9800.c ++++ b/drivers/net/usb/sr9800.c +@@ -336,7 +336,7 @@ static void sr_set_multicast(struct net_device *net) + static int sr_mdio_read(struct net_device *net, int phy_id, int loc) + { + struct usbnet *dev = netdev_priv(net); +- __le16 res; ++ __le16 res = 0; + + mutex_lock(&dev->phy_mutex); + sr_set_sw_mii(dev); +diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c +index dc45d16e8d21..383d4fa555a8 100644 +--- a/drivers/net/wireless/ath/ath10k/core.c ++++ b/drivers/net/wireless/ath/ath10k/core.c +@@ -2118,12 +2118,15 @@ static int ath10k_init_uart(struct ath10k *ar) + return ret; + } + +- if (!uart_print && ar->hw_params.uart_pin_workaround) { +- ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, +- ar->hw_params.uart_pin); +- if (ret) { +- ath10k_warn(ar, "failed to set UART TX pin: %d", ret); +- return ret; ++ if (!uart_print) { ++ if (ar->hw_params.uart_pin_workaround) { ++ ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, ++ ar->hw_params.uart_pin); ++ if (ret) { ++ ath10k_warn(ar, "failed to set UART TX pin: %d", ++ ret); ++ return ret; ++ } + } + + return 0; +diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c +index 4defb7a0330f..53b66e9434c9 100644 +--- a/drivers/net/wireless/ath/ath6kl/usb.c ++++ b/drivers/net/wireless/ath/ath6kl/usb.c +@@ -132,6 +132,10 @@ ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe) + struct ath6kl_urb_context *urb_context = NULL; + unsigned long flags; + ++ /* bail if this pipe is not initialized */ ++ if (!pipe->ar_usb) ++ return NULL; ++ + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); + if (!list_empty(&pipe->urb_list_head)) { + urb_context = +@@ -150,6 +154,10 @@ static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe, + { + unsigned long flags; + ++ /* bail if this pipe is not initialized */ ++ if (!pipe->ar_usb) ++ return; ++ + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); + pipe->urb_cnt++; + +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +index 8b0b464a1f21..c520f42d165c 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +@@ -887,15 +887,17 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm) + * firmware versions. Unfortunately, we don't have a TLV API + * flag to rely on, so rely on the major version which is in + * the first byte of ucode_ver. This was implemented +- * initially on version 38 and then backported to29 and 17. +- * The intention was to have it in 36 as well, but not all +- * 8000 family got this feature enabled. The 8000 family is +- * the only one using version 36, so skip this version +- * entirely. ++ * initially on version 38 and then backported to 17. It was ++ * also backported to 29, but only for 7265D devices. The ++ * intention was to have it in 36 as well, but not all 8000 ++ * family got this feature enabled. The 8000 family is the ++ * only one using version 36, so skip this version entirely. + */ + return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 || +- IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 || +- IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17; ++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17 || ++ (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 && ++ ((mvm->trans->hw_rev & CSR_HW_REV_TYPE_MSK) == ++ CSR_HW_REV_TYPE_7265D)); + } + + int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) +diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c +index 4055e0ab75ba..05050f6c36db 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/pci.c ++++ b/drivers/net/wireless/realtek/rtlwifi/pci.c +@@ -822,7 +822,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) + hdr = rtl_get_hdr(skb); + fc = rtl_get_fc(skb); + +- if (!stats.crc && !stats.hwerror) { ++ if (!stats.crc && !stats.hwerror && (skb->len > FCS_LEN)) { + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, + sizeof(rx_status)); + +@@ -859,6 +859,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) + _rtl_pci_rx_to_mac80211(hw, skb, rx_status); + } + } else { ++ /* drop packets with errors or those too short */ + dev_kfree_skb_any(skb); + } + new_trx_end: +diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c +index 70f04c2f5b17..fff8dda14023 100644 +--- a/drivers/net/wireless/realtek/rtlwifi/ps.c ++++ b/drivers/net/wireless/realtek/rtlwifi/ps.c +@@ -754,6 +754,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, + return; + } else { + noa_num = (noa_len - 2) / 13; ++ if (noa_num > P2P_MAX_NOA_NUM) ++ noa_num = P2P_MAX_NOA_NUM; ++ + } + noa_index = ie[3]; + if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == +@@ -848,6 +851,9 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data, + return; + } else { + noa_num = (noa_len - 2) / 13; ++ if (noa_num > P2P_MAX_NOA_NUM) ++ noa_num = P2P_MAX_NOA_NUM; ++ + } + noa_index = ie[3]; + if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode == +diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c +index 1172f6c0605b..d61d534396c7 100644 +--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c ++++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c +@@ -997,7 +997,7 @@ static void rtw8822b_do_iqk(struct rtw_dev *rtwdev) + rtw_write_rf(rtwdev, RF_PATH_A, RF_DTXLOK, RFREG_MASK, 0x0); + + reload = !!rtw_read32_mask(rtwdev, REG_IQKFAILMSK, BIT(16)); +- iqk_fail_mask = rtw_read32_mask(rtwdev, REG_IQKFAILMSK, GENMASK(0, 7)); ++ iqk_fail_mask = rtw_read32_mask(rtwdev, REG_IQKFAILMSK, GENMASK(7, 0)); + rtw_dbg(rtwdev, RTW_DBG_PHY, + "iqk counter=%d reload=%d do_iqk_cnt=%d n_iqk_fail(mask)=0x%02x\n", + counter, reload, ++do_iqk_cnt, iqk_fail_mask); +diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c +index c5289eaf17ee..e897e4d768ef 100644 +--- a/drivers/nfc/pn533/usb.c ++++ b/drivers/nfc/pn533/usb.c +@@ -547,18 +547,25 @@ static int pn533_usb_probe(struct usb_interface *interface, + + rc = pn533_finalize_setup(priv); + if (rc) +- goto error; ++ goto err_deregister; + + usb_set_intfdata(interface, phy); + + return 0; + ++err_deregister: ++ pn533_unregister_device(phy->priv); + error: ++ usb_kill_urb(phy->in_urb); ++ usb_kill_urb(phy->out_urb); ++ usb_kill_urb(phy->ack_urb); ++ + usb_free_urb(phy->in_urb); + usb_free_urb(phy->out_urb); + usb_free_urb(phy->ack_urb); + usb_put_dev(phy->udev); + kfree(in_buf); ++ kfree(phy->ack_buffer); + + return rc; + } +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index 36a5ed1eacbe..3304e2c8a448 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -852,7 +852,7 @@ out: + static int nvme_submit_user_cmd(struct request_queue *q, + struct nvme_command *cmd, void __user *ubuffer, + unsigned bufflen, void __user *meta_buffer, unsigned meta_len, +- u32 meta_seed, u64 *result, unsigned timeout) ++ u32 meta_seed, u32 *result, unsigned timeout) + { + bool write = nvme_is_write(cmd); + struct nvme_ns *ns = q->queuedata; +@@ -893,7 +893,7 @@ static int nvme_submit_user_cmd(struct request_queue *q, + else + ret = nvme_req(req)->status; + if (result) +- *result = le64_to_cpu(nvme_req(req)->result.u64); ++ *result = le32_to_cpu(nvme_req(req)->result.u32); + if (meta && !ret && !write) { + if (copy_to_user(meta_buffer, meta, meta_len)) + ret = -EFAULT; +@@ -1339,54 +1339,6 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, + struct nvme_command c; + unsigned timeout = 0; + u32 effects; +- u64 result; +- int status; +- +- if (!capable(CAP_SYS_ADMIN)) +- return -EACCES; +- if (copy_from_user(&cmd, ucmd, sizeof(cmd))) +- return -EFAULT; +- if (cmd.flags) +- return -EINVAL; +- +- memset(&c, 0, sizeof(c)); +- c.common.opcode = cmd.opcode; +- c.common.flags = cmd.flags; +- c.common.nsid = cpu_to_le32(cmd.nsid); +- c.common.cdw2[0] = cpu_to_le32(cmd.cdw2); +- c.common.cdw2[1] = cpu_to_le32(cmd.cdw3); +- c.common.cdw10 = cpu_to_le32(cmd.cdw10); +- c.common.cdw11 = cpu_to_le32(cmd.cdw11); +- c.common.cdw12 = cpu_to_le32(cmd.cdw12); +- c.common.cdw13 = cpu_to_le32(cmd.cdw13); +- c.common.cdw14 = cpu_to_le32(cmd.cdw14); +- c.common.cdw15 = cpu_to_le32(cmd.cdw15); +- +- if (cmd.timeout_ms) +- timeout = msecs_to_jiffies(cmd.timeout_ms); +- +- effects = nvme_passthru_start(ctrl, ns, cmd.opcode); +- status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, +- (void __user *)(uintptr_t)cmd.addr, cmd.data_len, +- (void __user *)(uintptr_t)cmd.metadata, +- cmd.metadata_len, 0, &result, timeout); +- nvme_passthru_end(ctrl, effects); +- +- if (status >= 0) { +- if (put_user(result, &ucmd->result)) +- return -EFAULT; +- } +- +- return status; +-} +- +-static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, +- struct nvme_passthru_cmd64 __user *ucmd) +-{ +- struct nvme_passthru_cmd64 cmd; +- struct nvme_command c; +- unsigned timeout = 0; +- u32 effects; + int status; + + if (!capable(CAP_SYS_ADMIN)) +@@ -1457,41 +1409,6 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx) + srcu_read_unlock(&head->srcu, idx); + } + +-static bool is_ctrl_ioctl(unsigned int cmd) +-{ +- if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD) +- return true; +- if (is_sed_ioctl(cmd)) +- return true; +- return false; +-} +- +-static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd, +- void __user *argp, +- struct nvme_ns_head *head, +- int srcu_idx) +-{ +- struct nvme_ctrl *ctrl = ns->ctrl; +- int ret; +- +- nvme_get_ctrl(ns->ctrl); +- nvme_put_ns_from_disk(head, srcu_idx); +- +- switch (cmd) { +- case NVME_IOCTL_ADMIN_CMD: +- ret = nvme_user_cmd(ctrl, NULL, argp); +- break; +- case NVME_IOCTL_ADMIN64_CMD: +- ret = nvme_user_cmd64(ctrl, NULL, argp); +- break; +- default: +- ret = sed_ioctl(ctrl->opal_dev, cmd, argp); +- break; +- } +- nvme_put_ctrl(ctrl); +- return ret; +-} +- + static int nvme_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) + { +@@ -1509,8 +1426,20 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, + * seperately and drop the ns SRCU reference early. This avoids a + * deadlock when deleting namespaces using the passthrough interface. + */ +- if (is_ctrl_ioctl(cmd)) +- return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx); ++ if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) { ++ struct nvme_ctrl *ctrl = ns->ctrl; ++ ++ nvme_get_ctrl(ns->ctrl); ++ nvme_put_ns_from_disk(head, srcu_idx); ++ ++ if (cmd == NVME_IOCTL_ADMIN_CMD) ++ ret = nvme_user_cmd(ctrl, NULL, argp); ++ else ++ ret = sed_ioctl(ctrl->opal_dev, cmd, argp); ++ ++ nvme_put_ctrl(ctrl); ++ return ret; ++ } + + switch (cmd) { + case NVME_IOCTL_ID: +@@ -1523,9 +1452,6 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, + case NVME_IOCTL_SUBMIT_IO: + ret = nvme_submit_io(ns, argp); + break; +- case NVME_IOCTL_IO64_CMD: +- ret = nvme_user_cmd64(ns->ctrl, ns, argp); +- break; + default: + if (ns->ndev) + ret = nvme_nvm_ioctl(ns, cmd, arg); +@@ -2900,8 +2826,6 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, + switch (cmd) { + case NVME_IOCTL_ADMIN_CMD: + return nvme_user_cmd(ctrl, NULL, argp); +- case NVME_IOCTL_ADMIN64_CMD: +- return nvme_user_cmd64(ctrl, NULL, argp); + case NVME_IOCTL_IO_CMD: + return nvme_dev_user_cmd(ctrl, argp); + case NVME_IOCTL_RESET: +diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h +index ba7d2480613b..dcdaba689b20 100644 +--- a/drivers/s390/cio/cio.h ++++ b/drivers/s390/cio/cio.h +@@ -113,6 +113,7 @@ struct subchannel { + enum sch_todo todo; + struct work_struct todo_work; + struct schib_config config; ++ u64 dma_mask; + char *driver_override; /* Driver name to force a match */ + } __attribute__ ((aligned(8))); + +diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c +index 1fbfb0a93f5f..831850435c23 100644 +--- a/drivers/s390/cio/css.c ++++ b/drivers/s390/cio/css.c +@@ -232,7 +232,12 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid, + * belong to a subchannel need to fit 31 bit width (e.g. ccw). + */ + sch->dev.coherent_dma_mask = DMA_BIT_MASK(31); +- sch->dev.dma_mask = &sch->dev.coherent_dma_mask; ++ /* ++ * But we don't have such restrictions imposed on the stuff that ++ * is handled by the streaming API. ++ */ ++ sch->dma_mask = DMA_BIT_MASK(64); ++ sch->dev.dma_mask = &sch->dma_mask; + return sch; + + err: +diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c +index c421899be20f..027ef1dde5a7 100644 +--- a/drivers/s390/cio/device.c ++++ b/drivers/s390/cio/device.c +@@ -710,7 +710,7 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) + if (!cdev->private) + goto err_priv; + cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask; +- cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask; ++ cdev->dev.dma_mask = sch->dev.dma_mask; + dma_pool = cio_gp_dma_create(&cdev->dev, 1); + if (!dma_pool) + goto err_dma_pool; +diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c +index 6b7b390b2e52..9584c5a48397 100644 +--- a/drivers/scsi/qla2xxx/qla_attr.c ++++ b/drivers/scsi/qla2xxx/qla_attr.c +@@ -441,9 +441,6 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, + valid = 0; + if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) + valid = 1; +- else if (start == (ha->flt_region_boot * 4) || +- start == (ha->flt_region_fw * 4)) +- valid = 1; + else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) + valid = 1; + if (!valid) { +@@ -491,8 +488,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, + "Writing flash region -- 0x%x/0x%x.\n", + ha->optrom_region_start, ha->optrom_region_size); + +- ha->isp_ops->write_optrom(vha, ha->optrom_buffer, ++ rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer, + ha->optrom_region_start, ha->optrom_region_size); ++ if (rval) ++ rval = -EIO; + break; + default: + rval = -EINVAL; +diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +index 664d93a7f90d..4fac9dca798e 100644 +--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c ++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c +@@ -348,8 +348,10 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj, + } + + padapter->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL); +- if (!padapter->HalData) +- DBG_88E("cant not alloc memory for HAL DATA\n"); ++ if (!padapter->HalData) { ++ DBG_88E("Failed to allocate memory for HAL data\n"); ++ goto free_adapter; ++ } + + /* step read_chip_version */ + rtw_hal_read_chip_version(padapter); +diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c +index c70caf4ea490..a2b5c796bbc4 100644 +--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c ++++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c +@@ -1831,7 +1831,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) + + while (credits) { + struct sk_buff *p = cxgbit_sock_peek_wr(csk); +- const u32 csum = (__force u32)p->csum; ++ u32 csum; + + if (unlikely(!p)) { + pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n", +@@ -1840,6 +1840,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb) + break; + } + ++ csum = (__force u32)p->csum; + if (unlikely(credits < csum)) { + pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n", + csk, csk->tid, +diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c +index 27fbe62c7ddd..9c782706e652 100644 +--- a/drivers/thunderbolt/nhi.c ++++ b/drivers/thunderbolt/nhi.c +@@ -143,9 +143,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring) + return io; + } + +-static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset) ++static void ring_iowrite_cons(struct tb_ring *ring, u16 cons) + { +- iowrite16(value, ring_desc_base(ring) + offset); ++ /* ++ * The other 16-bits in the register is read-only and writes to it ++ * are ignored by the hardware so we can save one ioread32() by ++ * filling the read-only bits with zeroes. ++ */ ++ iowrite32(cons, ring_desc_base(ring) + 8); ++} ++ ++static void ring_iowrite_prod(struct tb_ring *ring, u16 prod) ++{ ++ /* See ring_iowrite_cons() above for explanation */ ++ iowrite32(prod << 16, ring_desc_base(ring) + 8); + } + + static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) +@@ -197,7 +208,10 @@ static void ring_write_descriptors(struct tb_ring *ring) + descriptor->sof = frame->sof; + } + ring->head = (ring->head + 1) % ring->size; +- ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8); ++ if (ring->is_tx) ++ ring_iowrite_prod(ring, ring->head); ++ else ++ ring_iowrite_cons(ring, ring->head); + } + } + +@@ -662,7 +676,7 @@ void tb_ring_stop(struct tb_ring *ring) + + ring_iowrite32options(ring, 0, 0); + ring_iowrite64desc(ring, 0, 0); +- ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8); ++ ring_iowrite32desc(ring, 0, 8); + ring_iowrite32desc(ring, 0, 12); + ring->head = 0; + ring->tail = 0; +diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c +index 31d0234837e4..5a99234826e7 100644 +--- a/drivers/thunderbolt/tunnel.c ++++ b/drivers/thunderbolt/tunnel.c +@@ -211,7 +211,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, + return NULL; + } + tb_pci_init_path(path); +- tunnel->paths[TB_PCI_PATH_UP] = path; ++ tunnel->paths[TB_PCI_PATH_DOWN] = path; + + path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0, + "PCIe Up"); +@@ -220,7 +220,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, + return NULL; + } + tb_pci_init_path(path); +- tunnel->paths[TB_PCI_PATH_DOWN] = path; ++ tunnel->paths[TB_PCI_PATH_UP] = path; + + return tunnel; + } +diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c +index e55c79eb6430..98361acd3053 100644 +--- a/drivers/tty/n_hdlc.c ++++ b/drivers/tty/n_hdlc.c +@@ -968,6 +968,11 @@ static int __init n_hdlc_init(void) + + } /* end of init_module() */ + ++#ifdef CONFIG_SPARC ++#undef __exitdata ++#define __exitdata ++#endif ++ + static const char hdlc_unregister_ok[] __exitdata = + KERN_INFO "N_HDLC: line discipline unregistered\n"; + static const char hdlc_unregister_fail[] __exitdata = +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c +index 3ef65cbd2478..e4b08077f875 100644 +--- a/drivers/tty/serial/8250/8250_omap.c ++++ b/drivers/tty/serial/8250/8250_omap.c +@@ -141,7 +141,7 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) + + serial8250_do_set_mctrl(port, mctrl); + +- if (!up->gpios) { ++ if (!mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS)) { + /* + * Turn off autoRTS if RTS is lowered and restore autoRTS + * setting if RTS is raised +@@ -456,7 +456,8 @@ static void omap_8250_set_termios(struct uart_port *port, + up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF); + + if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW && +- !up->gpios) { ++ !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS) && ++ !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_CTS)) { + /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */ + up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; + priv->efr |= UART_EFR_CTS; +diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig +index 3083dbae35f7..3b436ccd29da 100644 +--- a/drivers/tty/serial/Kconfig ++++ b/drivers/tty/serial/Kconfig +@@ -1075,6 +1075,7 @@ config SERIAL_SIFIVE_CONSOLE + bool "Console on SiFive UART" + depends on SERIAL_SIFIVE=y + select SERIAL_CORE_CONSOLE ++ select SERIAL_EARLYCON + help + Select this option if you would like to use a SiFive UART as the + system console. +diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c +index 29a6dc6a8d23..73fcc6bdb031 100644 +--- a/drivers/tty/serial/owl-uart.c ++++ b/drivers/tty/serial/owl-uart.c +@@ -742,7 +742,7 @@ static int __init owl_uart_init(void) + return ret; + } + +-static void __init owl_uart_exit(void) ++static void __exit owl_uart_exit(void) + { + platform_driver_unregister(&owl_uart_platform_driver); + uart_unregister_driver(&owl_uart_driver); +diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c +index 284623eefaeb..ba5e488a0374 100644 +--- a/drivers/tty/serial/rda-uart.c ++++ b/drivers/tty/serial/rda-uart.c +@@ -817,7 +817,7 @@ static int __init rda_uart_init(void) + return ret; + } + +-static void __init rda_uart_exit(void) ++static void __exit rda_uart_exit(void) + { + platform_driver_unregister(&rda_uart_platform_driver); + uart_unregister_driver(&rda_uart_driver); +diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c +index 2b400189be91..54c43e02e375 100644 +--- a/drivers/tty/serial/serial_mctrl_gpio.c ++++ b/drivers/tty/serial/serial_mctrl_gpio.c +@@ -61,6 +61,9 @@ EXPORT_SYMBOL_GPL(mctrl_gpio_set); + struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios, + enum mctrl_gpio_idx gidx) + { ++ if (gpios == NULL) ++ return NULL; ++ + return gpios->gpio[gidx]; + } + EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod); +diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c +index 7cf34beb50df..4d8f8f4ecf98 100644 +--- a/drivers/usb/gadget/udc/core.c ++++ b/drivers/usb/gadget/udc/core.c +@@ -98,6 +98,17 @@ int usb_ep_enable(struct usb_ep *ep) + if (ep->enabled) + goto out; + ++ /* UDC drivers can't handle endpoints with maxpacket size 0 */ ++ if (usb_endpoint_maxp(ep->desc) == 0) { ++ /* ++ * We should log an error message here, but we can't call ++ * dev_err() because there's no way to find the gadget ++ * given only ep. ++ */ ++ ret = -EINVAL; ++ goto out; ++ } ++ + ret = ep->ops->enable(ep, ep->desc); + if (ret) + goto out; +diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c +index 7ba6afc7ef23..76c3f29562d2 100644 +--- a/drivers/usb/host/xhci-debugfs.c ++++ b/drivers/usb/host/xhci-debugfs.c +@@ -202,10 +202,10 @@ static void xhci_ring_dump_segment(struct seq_file *s, + trb = &seg->trbs[i]; + dma = seg->dma + i * sizeof(*trb); + seq_printf(s, "%pad: %s\n", &dma, +- xhci_decode_trb(trb->generic.field[0], +- trb->generic.field[1], +- trb->generic.field[2], +- trb->generic.field[3])); ++ xhci_decode_trb(le32_to_cpu(trb->generic.field[0]), ++ le32_to_cpu(trb->generic.field[1]), ++ le32_to_cpu(trb->generic.field[2]), ++ le32_to_cpu(trb->generic.field[3]))); + } + } + +@@ -263,10 +263,10 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused) + xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus)); + slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); + seq_printf(s, "%pad: %s\n", &dev->out_ctx->dma, +- xhci_decode_slot_context(slot_ctx->dev_info, +- slot_ctx->dev_info2, +- slot_ctx->tt_info, +- slot_ctx->dev_state)); ++ xhci_decode_slot_context(le32_to_cpu(slot_ctx->dev_info), ++ le32_to_cpu(slot_ctx->dev_info2), ++ le32_to_cpu(slot_ctx->tt_info), ++ le32_to_cpu(slot_ctx->dev_state))); + + return 0; + } +@@ -286,10 +286,10 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused) + ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci); + dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params); + seq_printf(s, "%pad: %s\n", &dma, +- xhci_decode_ep_context(ep_ctx->ep_info, +- ep_ctx->ep_info2, +- ep_ctx->deq, +- ep_ctx->tx_info)); ++ xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info), ++ le32_to_cpu(ep_ctx->ep_info2), ++ le64_to_cpu(ep_ctx->deq), ++ le32_to_cpu(ep_ctx->tx_info))); + } + + return 0; +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 85ceb43e3405..e7aab31fd9a5 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -3330,6 +3330,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + if (xhci_urb_suitable_for_idt(urb)) { + memcpy(&send_addr, urb->transfer_buffer, + trb_buff_len); ++ le64_to_cpus(&send_addr); + field |= TRB_IDT; + } + } +@@ -3475,6 +3476,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, + if (xhci_urb_suitable_for_idt(urb)) { + memcpy(&addr, urb->transfer_buffer, + urb->transfer_buffer_length); ++ le64_to_cpus(&addr); + field |= TRB_IDT; + } else { + addr = (u64) urb->transfer_dma; +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index ee9d2e0fc53a..270e45058272 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -3071,6 +3071,48 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index, + } + } + ++static void xhci_endpoint_disable(struct usb_hcd *hcd, ++ struct usb_host_endpoint *host_ep) ++{ ++ struct xhci_hcd *xhci; ++ struct xhci_virt_device *vdev; ++ struct xhci_virt_ep *ep; ++ struct usb_device *udev; ++ unsigned long flags; ++ unsigned int ep_index; ++ ++ xhci = hcd_to_xhci(hcd); ++rescan: ++ spin_lock_irqsave(&xhci->lock, flags); ++ ++ udev = (struct usb_device *)host_ep->hcpriv; ++ if (!udev || !udev->slot_id) ++ goto done; ++ ++ vdev = xhci->devs[udev->slot_id]; ++ if (!vdev) ++ goto done; ++ ++ ep_index = xhci_get_endpoint_index(&host_ep->desc); ++ ep = &vdev->eps[ep_index]; ++ if (!ep) ++ goto done; ++ ++ /* wait for hub_tt_work to finish clearing hub TT */ ++ if (ep->ep_state & EP_CLEARING_TT) { ++ spin_unlock_irqrestore(&xhci->lock, flags); ++ schedule_timeout_uninterruptible(1); ++ goto rescan; ++ } ++ ++ if (ep->ep_state) ++ xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n", ++ ep->ep_state); ++done: ++ host_ep->hcpriv = NULL; ++ spin_unlock_irqrestore(&xhci->lock, flags); ++} ++ + /* + * Called after usb core issues a clear halt control message. + * The host side of the halt should already be cleared by a reset endpoint +@@ -5237,20 +5279,13 @@ static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd, + unsigned int ep_index; + unsigned long flags; + +- /* +- * udev might be NULL if tt buffer is cleared during a failed device +- * enumeration due to a halted control endpoint. Usb core might +- * have allocated a new udev for the next enumeration attempt. +- */ +- + xhci = hcd_to_xhci(hcd); ++ ++ spin_lock_irqsave(&xhci->lock, flags); + udev = (struct usb_device *)ep->hcpriv; +- if (!udev) +- return; + slot_id = udev->slot_id; + ep_index = xhci_get_endpoint_index(&ep->desc); + +- spin_lock_irqsave(&xhci->lock, flags); + xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT; + xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + spin_unlock_irqrestore(&xhci->lock, flags); +@@ -5287,6 +5322,7 @@ static const struct hc_driver xhci_hc_driver = { + .free_streams = xhci_free_streams, + .add_endpoint = xhci_add_endpoint, + .drop_endpoint = xhci_drop_endpoint, ++ .endpoint_disable = xhci_endpoint_disable, + .endpoint_reset = xhci_endpoint_reset, + .check_bandwidth = xhci_check_bandwidth, + .reset_bandwidth = xhci_reset_bandwidth, +diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c +index 15b5f06fb0b3..f5e34c503454 100644 +--- a/drivers/usb/misc/ldusb.c ++++ b/drivers/usb/misc/ldusb.c +@@ -495,11 +495,11 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count, + retval = -EFAULT; + goto unlock_exit; + } +- dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size; +- + retval = bytes_to_read; + + spin_lock_irq(&dev->rbsl); ++ dev->ring_tail = (dev->ring_tail + 1) % ring_buffer_size; ++ + if (dev->buffer_overflow) { + dev->buffer_overflow = 0; + spin_unlock_irq(&dev->rbsl); +@@ -580,7 +580,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer, + 1 << 8, 0, + dev->interrupt_out_buffer, + bytes_to_write, +- USB_CTRL_SET_TIMEOUT * HZ); ++ USB_CTRL_SET_TIMEOUT); + if (retval < 0) + dev_err(&dev->intf->dev, + "Couldn't submit HID_REQ_SET_REPORT %d\n", +diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c +index 62dab2441ec4..23061f1526b4 100644 +--- a/drivers/usb/misc/legousbtower.c ++++ b/drivers/usb/misc/legousbtower.c +@@ -878,7 +878,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device + get_version_reply, + sizeof(*get_version_reply), + 1000); +- if (result < sizeof(*get_version_reply)) { ++ if (result != sizeof(*get_version_reply)) { + if (result >= 0) + result = -EIO; + dev_err(idev, "get version request failed: %d\n", result); +diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c +index 79314d8c94a4..ca3bd58f2025 100644 +--- a/drivers/usb/serial/whiteheat.c ++++ b/drivers/usb/serial/whiteheat.c +@@ -559,6 +559,10 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command, + + command_port = port->serial->port[COMMAND_PORT]; + command_info = usb_get_serial_port_data(command_port); ++ ++ if (command_port->bulk_out_size < datasize + 1) ++ return -EIO; ++ + mutex_lock(&command_info->mutex); + command_info->command_finished = false; + +@@ -632,6 +636,7 @@ static void firm_setup_port(struct tty_struct *tty) + struct device *dev = &port->dev; + struct whiteheat_port_settings port_settings; + unsigned int cflag = tty->termios.c_cflag; ++ speed_t baud; + + port_settings.port = port->port_number + 1; + +@@ -692,11 +697,13 @@ static void firm_setup_port(struct tty_struct *tty) + dev_dbg(dev, "%s - XON = %2x, XOFF = %2x\n", __func__, port_settings.xon, port_settings.xoff); + + /* get the baud rate wanted */ +- port_settings.baud = tty_get_baud_rate(tty); +- dev_dbg(dev, "%s - baud rate = %d\n", __func__, port_settings.baud); ++ baud = tty_get_baud_rate(tty); ++ port_settings.baud = cpu_to_le32(baud); ++ dev_dbg(dev, "%s - baud rate = %u\n", __func__, baud); + + /* fixme: should set validated settings */ +- tty_encode_baud_rate(tty, port_settings.baud, port_settings.baud); ++ tty_encode_baud_rate(tty, baud, baud); ++ + /* handle any settings that aren't specified in the tty structure */ + port_settings.lloop = 0; + +diff --git a/drivers/usb/serial/whiteheat.h b/drivers/usb/serial/whiteheat.h +index 00398149cd8d..269e727a92f9 100644 +--- a/drivers/usb/serial/whiteheat.h ++++ b/drivers/usb/serial/whiteheat.h +@@ -87,7 +87,7 @@ struct whiteheat_simple { + + struct whiteheat_port_settings { + __u8 port; /* port number (1 to N) */ +- __u32 baud; /* any value 7 - 460800, firmware calculates ++ __le32 baud; /* any value 7 - 460800, firmware calculates + best fit; arrives little endian */ + __u8 bits; /* 5, 6, 7, or 8 */ + __u8 stop; /* 1 or 2, default 1 (2 = 1.5 if bits = 5) */ +diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c +index 05b80211290d..f3c4caf64051 100644 +--- a/drivers/usb/storage/scsiglue.c ++++ b/drivers/usb/storage/scsiglue.c +@@ -67,7 +67,6 @@ static const char* host_info(struct Scsi_Host *host) + static int slave_alloc (struct scsi_device *sdev) + { + struct us_data *us = host_to_us(sdev->host); +- int maxp; + + /* + * Set the INQUIRY transfer length to 36. We don't use any of +@@ -76,15 +75,6 @@ static int slave_alloc (struct scsi_device *sdev) + */ + sdev->inquiry_len = 36; + +- /* +- * USB has unusual scatter-gather requirements: the length of each +- * scatterlist element except the last must be divisible by the +- * Bulk maxpacket value. Fortunately this value is always a +- * power of 2. Inform the block layer about this requirement. +- */ +- maxp = usb_maxpacket(us->pusb_dev, us->recv_bulk_pipe, 0); +- blk_queue_virt_boundary(sdev->request_queue, maxp - 1); +- + /* + * Some host controllers may have alignment requirements. + * We'll play it safe by requiring 512-byte alignment always. +diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c +index 047c5922618f..0d044d59317e 100644 +--- a/drivers/usb/storage/uas.c ++++ b/drivers/usb/storage/uas.c +@@ -789,29 +789,9 @@ static int uas_slave_alloc(struct scsi_device *sdev) + { + struct uas_dev_info *devinfo = + (struct uas_dev_info *)sdev->host->hostdata; +- int maxp; + + sdev->hostdata = devinfo; + +- /* +- * We have two requirements here. We must satisfy the requirements +- * of the physical HC and the demands of the protocol, as we +- * definitely want no additional memory allocation in this path +- * ruling out using bounce buffers. +- * +- * For a transmission on USB to continue we must never send +- * a package that is smaller than maxpacket. Hence the length of each +- * scatterlist element except the last must be divisible by the +- * Bulk maxpacket value. +- * If the HC does not ensure that through SG, +- * the upper layer must do that. We must assume nothing +- * about the capabilities off the HC, so we use the most +- * pessimistic requirement. +- */ +- +- maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0); +- blk_queue_virt_boundary(sdev->request_queue, maxp - 1); +- + /* + * The protocol has no requirements on alignment in the strict sense. + * Controllers may or may not have alignment restrictions. +diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c +index 75fd140b02ff..43c391626a00 100644 +--- a/drivers/virt/vboxguest/vboxguest_utils.c ++++ b/drivers/virt/vboxguest/vboxguest_utils.c +@@ -220,6 +220,8 @@ static int hgcm_call_preprocess_linaddr( + if (!bounce_buf) + return -ENOMEM; + ++ *bounce_buf_ret = bounce_buf; ++ + if (copy_in) { + ret = copy_from_user(bounce_buf, (void __user *)buf, len); + if (ret) +@@ -228,7 +230,6 @@ static int hgcm_call_preprocess_linaddr( + memset(bounce_buf, 0, len); + } + +- *bounce_buf_ret = bounce_buf; + hgcm_call_add_pagelist_size(bounce_buf, len, extra); + return 0; + } +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c +index bdc08244a648..a8041e451e9e 100644 +--- a/drivers/virtio/virtio_ring.c ++++ b/drivers/virtio/virtio_ring.c +@@ -1499,9 +1499,6 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) + * counter first before updating event flags. + */ + virtio_wmb(vq->weak_barriers); +- } else { +- used_idx = vq->last_used_idx; +- wrap_counter = vq->packed.used_wrap_counter; + } + + if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) { +@@ -1518,7 +1515,9 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq) + */ + virtio_mb(vq->weak_barriers); + +- if (is_used_desc_packed(vq, used_idx, wrap_counter)) { ++ if (is_used_desc_packed(vq, ++ vq->last_used_idx, ++ vq->packed.used_wrap_counter)) { + END_USE(vq); + return false; + } +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index e7a1ec075c65..180749080fd8 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -2758,8 +2758,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, + int nitems, bool use_global_rsv); + void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info, + struct btrfs_block_rsv *rsv); +-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes, +- bool qgroup_free); ++void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes); + + int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes); + int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache); +diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c +index 934521fe7e71..8d2bb28ff5e0 100644 +--- a/fs/btrfs/delalloc-space.c ++++ b/fs/btrfs/delalloc-space.c +@@ -407,7 +407,6 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes, + * btrfs_delalloc_release_extents - release our outstanding_extents + * @inode: the inode to balance the reservation for. + * @num_bytes: the number of bytes we originally reserved with +- * @qgroup_free: do we need to free qgroup meta reservation or convert them. + * + * When we reserve space we increase outstanding_extents for the extents we may + * add. Once we've set the range as delalloc or created our ordered extents we +@@ -415,8 +414,7 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes, + * temporarily tracked outstanding_extents. This _must_ be used in conjunction + * with btrfs_delalloc_reserve_metadata. + */ +-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes, +- bool qgroup_free) ++void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes) + { + struct btrfs_fs_info *fs_info = inode->root->fs_info; + unsigned num_extents; +@@ -430,7 +428,7 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes, + if (btrfs_is_testing(fs_info)) + return; + +- btrfs_inode_rsv_release(inode, qgroup_free); ++ btrfs_inode_rsv_release(inode, true); + } + + /** +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c +index d68add0bf346..a8a2adaf222f 100644 +--- a/fs/btrfs/file.c ++++ b/fs/btrfs/file.c +@@ -1692,7 +1692,7 @@ again: + force_page_uptodate); + if (ret) { + btrfs_delalloc_release_extents(BTRFS_I(inode), +- reserve_bytes, true); ++ reserve_bytes); + break; + } + +@@ -1704,7 +1704,7 @@ again: + if (extents_locked == -EAGAIN) + goto again; + btrfs_delalloc_release_extents(BTRFS_I(inode), +- reserve_bytes, true); ++ reserve_bytes); + ret = extents_locked; + break; + } +@@ -1772,8 +1772,7 @@ again: + else + free_extent_state(cached_state); + +- btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes, +- true); ++ btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes); + if (ret) { + btrfs_drop_pages(pages, num_pages); + break; +diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c +index 2e8bb402050b..e2f49615c429 100644 +--- a/fs/btrfs/inode-map.c ++++ b/fs/btrfs/inode-map.c +@@ -484,12 +484,13 @@ again: + ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, + prealloc, prealloc, &alloc_hint); + if (ret) { +- btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, true); ++ btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc); ++ btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc, true); + goto out_put; + } + + ret = btrfs_write_out_ino_cache(root, trans, path, inode); +- btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, false); ++ btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc); + out_put: + iput(inode); + out_release: +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index b3453adb214d..1b85278471f6 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -2167,7 +2167,7 @@ again: + + ClearPageChecked(page); + set_page_dirty(page); +- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, false); ++ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); + out: + unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, + &cached_state); +@@ -4912,7 +4912,7 @@ again: + if (!page) { + btrfs_delalloc_release_space(inode, data_reserved, + block_start, blocksize, true); +- btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, true); ++ btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize); + ret = -ENOMEM; + goto out; + } +@@ -4980,7 +4980,7 @@ out_unlock: + if (ret) + btrfs_delalloc_release_space(inode, data_reserved, block_start, + blocksize, true); +- btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, (ret != 0)); ++ btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize); + unlock_page(page); + put_page(page); + out: +@@ -8685,7 +8685,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) + } else if (ret >= 0 && (size_t)ret < count) + btrfs_delalloc_release_space(inode, data_reserved, + offset, count - (size_t)ret, true); +- btrfs_delalloc_release_extents(BTRFS_I(inode), count, false); ++ btrfs_delalloc_release_extents(BTRFS_I(inode), count); + } + out: + if (wakeup) +@@ -9038,7 +9038,7 @@ again: + unlock_extent_cached(io_tree, page_start, page_end, &cached_state); + + if (!ret2) { +- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true); ++ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); + sb_end_pagefault(inode->i_sb); + extent_changeset_free(data_reserved); + return VM_FAULT_LOCKED; +@@ -9047,7 +9047,7 @@ again: + out_unlock: + unlock_page(page); + out: +- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0)); ++ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); + btrfs_delalloc_release_space(inode, data_reserved, page_start, + reserved_space, (ret != 0)); + out_noreserve: +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index 818f7ec8bb0e..8dad66df74ed 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -1360,8 +1360,7 @@ again: + unlock_page(pages[i]); + put_page(pages[i]); + } +- btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT, +- false); ++ btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT); + extent_changeset_free(data_reserved); + return i_done; + out: +@@ -1372,8 +1371,7 @@ out: + btrfs_delalloc_release_space(inode, data_reserved, + start_index << PAGE_SHIFT, + page_cnt << PAGE_SHIFT, true); +- btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT, +- true); ++ btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT); + extent_changeset_free(data_reserved); + return ret; + +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c +index 074947bebd16..572314aebdf1 100644 +--- a/fs/btrfs/relocation.c ++++ b/fs/btrfs/relocation.c +@@ -3277,7 +3277,7 @@ static int relocate_file_extent_cluster(struct inode *inode, + btrfs_delalloc_release_metadata(BTRFS_I(inode), + PAGE_SIZE, true); + btrfs_delalloc_release_extents(BTRFS_I(inode), +- PAGE_SIZE, true); ++ PAGE_SIZE); + ret = -ENOMEM; + goto out; + } +@@ -3298,7 +3298,7 @@ static int relocate_file_extent_cluster(struct inode *inode, + btrfs_delalloc_release_metadata(BTRFS_I(inode), + PAGE_SIZE, true); + btrfs_delalloc_release_extents(BTRFS_I(inode), +- PAGE_SIZE, true); ++ PAGE_SIZE); + ret = -EIO; + goto out; + } +@@ -3327,7 +3327,7 @@ static int relocate_file_extent_cluster(struct inode *inode, + btrfs_delalloc_release_metadata(BTRFS_I(inode), + PAGE_SIZE, true); + btrfs_delalloc_release_extents(BTRFS_I(inode), +- PAGE_SIZE, true); ++ PAGE_SIZE); + + clear_extent_bits(&BTRFS_I(inode)->io_tree, + page_start, page_end, +@@ -3343,8 +3343,7 @@ static int relocate_file_extent_cluster(struct inode *inode, + put_page(page); + + index++; +- btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, +- false); ++ btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE); + balance_dirty_pages_ratelimited(inode->i_mapping); + btrfs_throttle(fs_info); + } +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index c3c0c064c25d..91c702b4cae9 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -5070,7 +5070,7 @@ static int clone_range(struct send_ctx *sctx, + struct btrfs_path *path; + struct btrfs_key key; + int ret; +- u64 clone_src_i_size; ++ u64 clone_src_i_size = 0; + + /* + * Prevent cloning from a zero offset with a length matching the sector +diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c +index ed92958e842d..657f409d4de0 100644 +--- a/fs/cifs/netmisc.c ++++ b/fs/cifs/netmisc.c +@@ -117,10 +117,6 @@ static const struct smb_to_posix_error mapping_table_ERRSRV[] = { + {0, 0} + }; + +-static const struct smb_to_posix_error mapping_table_ERRHRD[] = { +- {0, 0} +-}; +- + /* + * Convert a string containing text IPv4 or IPv6 address to binary form. + * +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index dd0f64f7bc06..0c4b6a41e385 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -1476,6 +1476,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, + is_truncate = true; + } + ++ /* Flush dirty data/metadata before non-truncate SETATTR */ ++ if (is_wb && S_ISREG(inode->i_mode) && ++ attr->ia_valid & ++ (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET | ++ ATTR_TIMES_SET)) { ++ err = write_inode_now(inode, true); ++ if (err) ++ return err; ++ ++ fuse_set_nowrite(inode); ++ fuse_release_nowrite(inode); ++ } ++ + if (is_truncate) { + fuse_set_nowrite(inode); + set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index 91c99724dee0..d199dc0fbac1 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -201,7 +201,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) + { + struct fuse_conn *fc = get_fuse_conn(inode); + int err; +- bool lock_inode = (file->f_flags & O_TRUNC) && ++ bool is_wb_truncate = (file->f_flags & O_TRUNC) && + fc->atomic_o_trunc && + fc->writeback_cache; + +@@ -209,16 +209,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) + if (err) + return err; + +- if (lock_inode) ++ if (is_wb_truncate) { + inode_lock(inode); ++ fuse_set_nowrite(inode); ++ } + + err = fuse_do_open(fc, get_node_id(inode), file, isdir); + + if (!err) + fuse_finish_open(inode, file); + +- if (lock_inode) ++ if (is_wb_truncate) { ++ fuse_release_nowrite(inode); + inode_unlock(inode); ++ } + + return err; + } +diff --git a/fs/io_uring.c b/fs/io_uring.c +index ed223c33dd89..37da4ea68f50 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -338,6 +338,8 @@ struct io_kiocb { + #define REQ_F_LINK 64 /* linked sqes */ + #define REQ_F_LINK_DONE 128 /* linked sqes done */ + #define REQ_F_FAIL_LINK 256 /* fail rest of links */ ++#define REQ_F_ISREG 2048 /* regular file */ ++#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */ + u64 user_data; + u32 result; + u32 sequence; +@@ -885,26 +887,26 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, + return ret; + } + +-static void kiocb_end_write(struct kiocb *kiocb) ++static void kiocb_end_write(struct io_kiocb *req) + { +- if (kiocb->ki_flags & IOCB_WRITE) { +- struct inode *inode = file_inode(kiocb->ki_filp); ++ /* ++ * Tell lockdep we inherited freeze protection from submission ++ * thread. ++ */ ++ if (req->flags & REQ_F_ISREG) { ++ struct inode *inode = file_inode(req->file); + +- /* +- * Tell lockdep we inherited freeze protection from submission +- * thread. +- */ +- if (S_ISREG(inode->i_mode)) +- __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE); +- file_end_write(kiocb->ki_filp); ++ __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE); + } ++ file_end_write(req->file); + } + + static void io_complete_rw(struct kiocb *kiocb, long res, long res2) + { + struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); + +- kiocb_end_write(kiocb); ++ if (kiocb->ki_flags & IOCB_WRITE) ++ kiocb_end_write(req); + + if ((req->flags & REQ_F_LINK) && res != req->result) + req->flags |= REQ_F_FAIL_LINK; +@@ -916,7 +918,8 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) + { + struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); + +- kiocb_end_write(kiocb); ++ if (kiocb->ki_flags & IOCB_WRITE) ++ kiocb_end_write(req); + + if ((req->flags & REQ_F_LINK) && res != req->result) + req->flags |= REQ_F_FAIL_LINK; +@@ -1030,8 +1033,17 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, + if (!req->file) + return -EBADF; + +- if (force_nonblock && !io_file_supports_async(req->file)) +- force_nonblock = false; ++ if (S_ISREG(file_inode(req->file)->i_mode)) ++ req->flags |= REQ_F_ISREG; ++ ++ /* ++ * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so ++ * we know to async punt it even if it was opened O_NONBLOCK ++ */ ++ if (force_nonblock && !io_file_supports_async(req->file)) { ++ req->flags |= REQ_F_MUST_PUNT; ++ return -EAGAIN; ++ } + + kiocb->ki_pos = READ_ONCE(sqe->off); + kiocb->ki_flags = iocb_flags(kiocb->ki_filp); +@@ -1052,7 +1064,8 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, + return ret; + + /* don't allow async punt if RWF_NOWAIT was requested */ +- if (kiocb->ki_flags & IOCB_NOWAIT) ++ if ((kiocb->ki_flags & IOCB_NOWAIT) || ++ (req->file->f_flags & O_NONBLOCK)) + req->flags |= REQ_F_NOWAIT; + + if (force_nonblock) +@@ -1065,6 +1078,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, + + kiocb->ki_flags |= IOCB_HIPRI; + kiocb->ki_complete = io_complete_rw_iopoll; ++ req->result = 0; + } else { + if (kiocb->ki_flags & IOCB_HIPRI) + return -EINVAL; +@@ -1286,7 +1300,9 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, + * need async punt anyway, so it's more efficient to do it + * here. + */ +- if (force_nonblock && ret2 > 0 && ret2 < read_size) ++ if (force_nonblock && !(req->flags & REQ_F_NOWAIT) && ++ (req->flags & REQ_F_ISREG) && ++ ret2 > 0 && ret2 < read_size) + ret2 = -EAGAIN; + /* Catch -EAGAIN return for forced non-blocking submission */ + if (!force_nonblock || ret2 != -EAGAIN) { +@@ -1353,7 +1369,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, + * released so that it doesn't complain about the held lock when + * we return to userspace. + */ +- if (S_ISREG(file_inode(file)->i_mode)) { ++ if (req->flags & REQ_F_ISREG) { + __sb_start_write(file_inode(file)->i_sb, + SB_FREEZE_WRITE, true); + __sb_writers_release(file_inode(file)->i_sb, +@@ -2096,7 +2112,13 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, + } + + ret = __io_submit_sqe(ctx, req, s, true); +- if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { ++ ++ /* ++ * We async punt it if the file wasn't marked NOWAIT, or if the file ++ * doesn't support non-blocking read/write attempts ++ */ ++ if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) || ++ (req->flags & REQ_F_MUST_PUNT))) { + struct io_uring_sqe *sqe_copy; + + sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL); +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c +index 071b90a45933..ad7a77101471 100644 +--- a/fs/nfs/delegation.c ++++ b/fs/nfs/delegation.c +@@ -1181,7 +1181,7 @@ bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode) + if (delegation != NULL && + nfs4_stateid_match_other(dst, &delegation->stateid)) { + dst->seqid = delegation->stateid.seqid; +- return ret; ++ ret = true; + } + rcu_read_unlock(); + out: +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 1406858bae6c..e1e7d2724b97 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -6058,6 +6058,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, + } + status = task->tk_status; + if (setclientid.sc_cred) { ++ kfree(clp->cl_acceptor); + clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred); + put_rpccred(setclientid.sc_cred); + } +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index 85ca49549b39..52cab65f91cf 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -786,7 +786,6 @@ static void nfs_inode_remove_request(struct nfs_page *req) + struct nfs_inode *nfsi = NFS_I(inode); + struct nfs_page *head; + +- atomic_long_dec(&nfsi->nrequests); + if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { + head = req->wb_head; + +@@ -799,8 +798,10 @@ static void nfs_inode_remove_request(struct nfs_page *req) + spin_unlock(&mapping->private_lock); + } + +- if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) ++ if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) { + nfs_release_request(req); ++ atomic_long_dec(&nfsi->nrequests); ++ } + } + + static void +diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c +index a4c905d6b575..9b827143a350 100644 +--- a/fs/ocfs2/aops.c ++++ b/fs/ocfs2/aops.c +@@ -2042,7 +2042,8 @@ out_write_size: + inode->i_mtime = inode->i_ctime = current_time(inode); + di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec); + di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); +- ocfs2_update_inode_fsync_trans(handle, inode, 1); ++ if (handle) ++ ocfs2_update_inode_fsync_trans(handle, inode, 1); + } + if (handle) + ocfs2_journal_dirty(handle, wc->w_di_bh); +@@ -2139,13 +2140,30 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock, + struct ocfs2_dio_write_ctxt *dwc = NULL; + struct buffer_head *di_bh = NULL; + u64 p_blkno; +- loff_t pos = iblock << inode->i_sb->s_blocksize_bits; ++ unsigned int i_blkbits = inode->i_sb->s_blocksize_bits; ++ loff_t pos = iblock << i_blkbits; ++ sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits; + unsigned len, total_len = bh_result->b_size; + int ret = 0, first_get_block = 0; + + len = osb->s_clustersize - (pos & (osb->s_clustersize - 1)); + len = min(total_len, len); + ++ /* ++ * bh_result->b_size is count in get_more_blocks according to write ++ * "pos" and "end", we need map twice to return different buffer state: ++ * 1. area in file size, not set NEW; ++ * 2. area out file size, set NEW. ++ * ++ * iblock endblk ++ * |--------|---------|---------|--------- ++ * |<-------area in file------->| ++ */ ++ ++ if ((iblock <= endblk) && ++ ((iblock + ((len - 1) >> i_blkbits)) > endblk)) ++ len = (endblk - iblock + 1) << i_blkbits; ++ + mlog(0, "get block of %lu at %llu:%u req %u\n", + inode->i_ino, pos, len, total_len); + +@@ -2229,6 +2247,9 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock, + if (desc->c_needs_zero) + set_buffer_new(bh_result); + ++ if (iblock > endblk) ++ set_buffer_new(bh_result); ++ + /* May sleep in end_io. It should not happen in a irq context. So defer + * it to dio work queue. */ + set_buffer_defer_completion(bh_result); +diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c +index d6f7b299eb23..efeea208fdeb 100644 +--- a/fs/ocfs2/ioctl.c ++++ b/fs/ocfs2/ioctl.c +@@ -283,7 +283,7 @@ static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb, + if (inode_alloc) + inode_lock(inode_alloc); + +- if (o2info_coherent(&fi->ifi_req)) { ++ if (inode_alloc && o2info_coherent(&fi->ifi_req)) { + status = ocfs2_inode_lock(inode_alloc, &bh, 0); + if (status < 0) { + mlog_errno(status); +diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c +index 90c830e3758e..d8507972ee13 100644 +--- a/fs/ocfs2/xattr.c ++++ b/fs/ocfs2/xattr.c +@@ -1490,18 +1490,6 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc, + return loc->xl_ops->xlo_check_space(loc, xi); + } + +-static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash) +-{ +- loc->xl_ops->xlo_add_entry(loc, name_hash); +- loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash); +- /* +- * We can't leave the new entry's xe_name_offset at zero or +- * add_namevalue() will go nuts. We set it to the size of our +- * storage so that it can never be less than any other entry. +- */ +- loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size); +-} +- + static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc, + struct ocfs2_xattr_info *xi) + { +@@ -2133,29 +2121,31 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc, + if (rc) + goto out; + +- if (loc->xl_entry) { +- if (ocfs2_xa_can_reuse_entry(loc, xi)) { +- orig_value_size = loc->xl_entry->xe_value_size; +- rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); +- if (rc) +- goto out; +- goto alloc_value; +- } ++ if (!loc->xl_entry) { ++ rc = -EINVAL; ++ goto out; ++ } + +- if (!ocfs2_xattr_is_local(loc->xl_entry)) { +- orig_clusters = ocfs2_xa_value_clusters(loc); +- rc = ocfs2_xa_value_truncate(loc, 0, ctxt); +- if (rc) { +- mlog_errno(rc); +- ocfs2_xa_cleanup_value_truncate(loc, +- "overwriting", +- orig_clusters); +- goto out; +- } ++ if (ocfs2_xa_can_reuse_entry(loc, xi)) { ++ orig_value_size = loc->xl_entry->xe_value_size; ++ rc = ocfs2_xa_reuse_entry(loc, xi, ctxt); ++ if (rc) ++ goto out; ++ goto alloc_value; ++ } ++ ++ if (!ocfs2_xattr_is_local(loc->xl_entry)) { ++ orig_clusters = ocfs2_xa_value_clusters(loc); ++ rc = ocfs2_xa_value_truncate(loc, 0, ctxt); ++ if (rc) { ++ mlog_errno(rc); ++ ocfs2_xa_cleanup_value_truncate(loc, ++ "overwriting", ++ orig_clusters); ++ goto out; + } +- ocfs2_xa_wipe_namevalue(loc); +- } else +- ocfs2_xa_add_entry(loc, name_hash); ++ } ++ ocfs2_xa_wipe_namevalue(loc); + + /* + * If we get here, we have a blank entry. Fill it. We grow our +diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h +index 6eaa53cef0bd..30e676b36b24 100644 +--- a/include/linux/platform_data/dma-imx-sdma.h ++++ b/include/linux/platform_data/dma-imx-sdma.h +@@ -51,7 +51,10 @@ struct sdma_script_start_addrs { + /* End of v2 array */ + s32 zcanfd_2_mcu_addr; + s32 zqspi_2_mcu_addr; ++ s32 mcu_2_ecspi_addr; + /* End of v3 array */ ++ s32 mcu_2_zqspi_addr; ++ /* End of v4 array */ + }; + + /** +diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h +index 7638dbe7bc50..a940de03808d 100644 +--- a/include/linux/sunrpc/xprtsock.h ++++ b/include/linux/sunrpc/xprtsock.h +@@ -61,6 +61,7 @@ struct sock_xprt { + struct mutex recv_mutex; + struct sockaddr_storage srcaddr; + unsigned short srcport; ++ int xprt_err; + + /* + * UDP socket buffer size parameters +diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h +index df528a623548..ea985aa7a6c5 100644 +--- a/include/net/llc_conn.h ++++ b/include/net/llc_conn.h +@@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk); + + /* Access to a connection */ + int llc_conn_state_process(struct sock *sk, struct sk_buff *skb); +-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); ++void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb); + void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb); + void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit); + void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit); +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index 6b6b01234dd9..58b1fbc884a7 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -520,6 +520,11 @@ static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc) + return q; + } + ++static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc) ++{ ++ return rcu_dereference_bh(qdisc->dev_queue->qdisc); ++} ++ + static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) + { + return qdisc->dev_queue->qdisc_sleeping; +diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h +index edc5c887a44c..45556fe771c3 100644 +--- a/include/trace/events/rxrpc.h ++++ b/include/trace/events/rxrpc.h +@@ -519,10 +519,10 @@ TRACE_EVENT(rxrpc_local, + ); + + TRACE_EVENT(rxrpc_peer, +- TP_PROTO(struct rxrpc_peer *peer, enum rxrpc_peer_trace op, ++ TP_PROTO(unsigned int peer_debug_id, enum rxrpc_peer_trace op, + int usage, const void *where), + +- TP_ARGS(peer, op, usage, where), ++ TP_ARGS(peer_debug_id, op, usage, where), + + TP_STRUCT__entry( + __field(unsigned int, peer ) +@@ -532,7 +532,7 @@ TRACE_EVENT(rxrpc_peer, + ), + + TP_fast_assign( +- __entry->peer = peer->debug_id; ++ __entry->peer = peer_debug_id; + __entry->op = op; + __entry->usage = usage; + __entry->where = where; +diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h +index e168dc59e9a0..1c215ea1798e 100644 +--- a/include/uapi/linux/nvme_ioctl.h ++++ b/include/uapi/linux/nvme_ioctl.h +@@ -45,27 +45,6 @@ struct nvme_passthru_cmd { + __u32 result; + }; + +-struct nvme_passthru_cmd64 { +- __u8 opcode; +- __u8 flags; +- __u16 rsvd1; +- __u32 nsid; +- __u32 cdw2; +- __u32 cdw3; +- __u64 metadata; +- __u64 addr; +- __u32 metadata_len; +- __u32 data_len; +- __u32 cdw10; +- __u32 cdw11; +- __u32 cdw12; +- __u32 cdw13; +- __u32 cdw14; +- __u32 cdw15; +- __u32 timeout_ms; +- __u64 result; +-}; +- + #define nvme_admin_cmd nvme_passthru_cmd + + #define NVME_IOCTL_ID _IO('N', 0x40) +@@ -75,7 +54,5 @@ struct nvme_passthru_cmd64 { + #define NVME_IOCTL_RESET _IO('N', 0x44) + #define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45) + #define NVME_IOCTL_RESCAN _IO('N', 0x46) +-#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64) +-#define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64) + + #endif /* _UAPI_LINUX_NVME_IOCTL_H */ +diff --git a/kernel/events/core.c b/kernel/events/core.c +index a2a50b668ef3..53173883513c 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -3694,11 +3694,23 @@ static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) + perf_event_groups_insert(&ctx->flexible_groups, event); + } + ++/* pick an event from the flexible_groups to rotate */ + static inline struct perf_event * +-ctx_first_active(struct perf_event_context *ctx) ++ctx_event_to_rotate(struct perf_event_context *ctx) + { +- return list_first_entry_or_null(&ctx->flexible_active, +- struct perf_event, active_list); ++ struct perf_event *event; ++ ++ /* pick the first active flexible event */ ++ event = list_first_entry_or_null(&ctx->flexible_active, ++ struct perf_event, active_list); ++ ++ /* if no active flexible event, pick the first event */ ++ if (!event) { ++ event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree), ++ typeof(*event), group_node); ++ } ++ ++ return event; + } + + static bool perf_rotate_context(struct perf_cpu_context *cpuctx) +@@ -3723,9 +3735,9 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx) + perf_pmu_disable(cpuctx->ctx.pmu); + + if (task_rotate) +- task_event = ctx_first_active(task_ctx); ++ task_event = ctx_event_to_rotate(task_ctx); + if (cpu_rotate) +- cpu_event = ctx_first_active(&cpuctx->ctx); ++ cpu_event = ctx_event_to_rotate(&cpuctx->ctx); + + /* + * As per the order given at ctx_resched() first 'pop' task flexible +@@ -5512,8 +5524,10 @@ static void perf_mmap_close(struct vm_area_struct *vma) + perf_pmu_output_stop(event); + + /* now it's safe to free the pages */ +- atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); +- atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm); ++ if (!rb->aux_mmap_locked) ++ atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm); ++ else ++ atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm); + + /* this has to be the last one */ + rb_free_aux(rb); +@@ -5585,7 +5599,8 @@ again: + * undo the VM accounting. + */ + +- atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); ++ atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked, ++ &mmap_user->locked_vm); + atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm); + free_uid(mmap_user); + +@@ -5729,8 +5744,20 @@ accounting: + + user_locked = atomic_long_read(&user->locked_vm) + user_extra; + +- if (user_locked > user_lock_limit) ++ if (user_locked <= user_lock_limit) { ++ /* charge all to locked_vm */ ++ } else if (atomic_long_read(&user->locked_vm) >= user_lock_limit) { ++ /* charge all to pinned_vm */ ++ extra = user_extra; ++ user_extra = 0; ++ } else { ++ /* ++ * charge locked_vm until it hits user_lock_limit; ++ * charge the rest from pinned_vm ++ */ + extra = user_locked - user_lock_limit; ++ user_extra -= extra; ++ } + + lock_limit = rlimit(RLIMIT_MEMLOCK); + lock_limit >>= PAGE_SHIFT; +diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c +index 2305ce89a26c..46ed4e1383e2 100644 +--- a/kernel/sched/cputime.c ++++ b/kernel/sched/cputime.c +@@ -740,7 +740,7 @@ void vtime_account_system(struct task_struct *tsk) + + write_seqcount_begin(&vtime->seqcount); + /* We might have scheduled out from guest path */ +- if (current->flags & PF_VCPU) ++ if (tsk->flags & PF_VCPU) + vtime_account_guest(tsk, vtime); + else + __vtime_account_system(tsk, vtime); +@@ -783,7 +783,7 @@ void vtime_guest_enter(struct task_struct *tsk) + */ + write_seqcount_begin(&vtime->seqcount); + __vtime_account_system(tsk, vtime); +- current->flags |= PF_VCPU; ++ tsk->flags |= PF_VCPU; + write_seqcount_end(&vtime->seqcount); + } + EXPORT_SYMBOL_GPL(vtime_guest_enter); +@@ -794,7 +794,7 @@ void vtime_guest_exit(struct task_struct *tsk) + + write_seqcount_begin(&vtime->seqcount); + vtime_account_guest(tsk, vtime); +- current->flags &= ~PF_VCPU; ++ tsk->flags &= ~PF_VCPU; + write_seqcount_end(&vtime->seqcount); + } + EXPORT_SYMBOL_GPL(vtime_guest_exit); +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 86cfc5d5129c..649c6b60929e 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -4355,23 +4355,16 @@ static inline u64 sched_cfs_bandwidth_slice(void) + } + + /* +- * Replenish runtime according to assigned quota and update expiration time. +- * We use sched_clock_cpu directly instead of rq->clock to avoid adding +- * additional synchronization around rq->lock. ++ * Replenish runtime according to assigned quota. We use sched_clock_cpu ++ * directly instead of rq->clock to avoid adding additional synchronization ++ * around rq->lock. + * + * requires cfs_b->lock + */ + void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) + { +- u64 now; +- +- if (cfs_b->quota == RUNTIME_INF) +- return; +- +- now = sched_clock_cpu(smp_processor_id()); +- cfs_b->runtime = cfs_b->quota; +- cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); +- cfs_b->expires_seq++; ++ if (cfs_b->quota != RUNTIME_INF) ++ cfs_b->runtime = cfs_b->quota; + } + + static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) +@@ -4393,8 +4386,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) + { + struct task_group *tg = cfs_rq->tg; + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); +- u64 amount = 0, min_amount, expires; +- int expires_seq; ++ u64 amount = 0, min_amount; + + /* note: this is a positive sum as runtime_remaining <= 0 */ + min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; +@@ -4411,61 +4403,17 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) + cfs_b->idle = 0; + } + } +- expires_seq = cfs_b->expires_seq; +- expires = cfs_b->runtime_expires; + raw_spin_unlock(&cfs_b->lock); + + cfs_rq->runtime_remaining += amount; +- /* +- * we may have advanced our local expiration to account for allowed +- * spread between our sched_clock and the one on which runtime was +- * issued. +- */ +- if (cfs_rq->expires_seq != expires_seq) { +- cfs_rq->expires_seq = expires_seq; +- cfs_rq->runtime_expires = expires; +- } + + return cfs_rq->runtime_remaining > 0; + } + +-/* +- * Note: This depends on the synchronization provided by sched_clock and the +- * fact that rq->clock snapshots this value. +- */ +-static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) +-{ +- struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); +- +- /* if the deadline is ahead of our clock, nothing to do */ +- if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0)) +- return; +- +- if (cfs_rq->runtime_remaining < 0) +- return; +- +- /* +- * If the local deadline has passed we have to consider the +- * possibility that our sched_clock is 'fast' and the global deadline +- * has not truly expired. +- * +- * Fortunately we can check determine whether this the case by checking +- * whether the global deadline(cfs_b->expires_seq) has advanced. +- */ +- if (cfs_rq->expires_seq == cfs_b->expires_seq) { +- /* extend local deadline, drift is bounded above by 2 ticks */ +- cfs_rq->runtime_expires += TICK_NSEC; +- } else { +- /* global deadline is ahead, expiration has passed */ +- cfs_rq->runtime_remaining = 0; +- } +-} +- + static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) + { + /* dock delta_exec before expiring quota (as it could span periods) */ + cfs_rq->runtime_remaining -= delta_exec; +- expire_cfs_rq_runtime(cfs_rq); + + if (likely(cfs_rq->runtime_remaining > 0)) + return; +@@ -4658,8 +4606,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) + resched_curr(rq); + } + +-static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, +- u64 remaining, u64 expires) ++static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining) + { + struct cfs_rq *cfs_rq; + u64 runtime; +@@ -4684,7 +4631,6 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, + remaining -= runtime; + + cfs_rq->runtime_remaining += runtime; +- cfs_rq->runtime_expires = expires; + + /* we check whether we're throttled above */ + if (cfs_rq->runtime_remaining > 0) +@@ -4709,7 +4655,7 @@ next: + */ + static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags) + { +- u64 runtime, runtime_expires; ++ u64 runtime; + int throttled; + + /* no need to continue the timer with no bandwidth constraint */ +@@ -4737,8 +4683,6 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u + /* account preceding periods in which throttling occurred */ + cfs_b->nr_throttled += overrun; + +- runtime_expires = cfs_b->runtime_expires; +- + /* + * This check is repeated as we are holding onto the new bandwidth while + * we unthrottle. This can potentially race with an unthrottled group +@@ -4751,8 +4695,7 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u + cfs_b->distribute_running = 1; + raw_spin_unlock_irqrestore(&cfs_b->lock, flags); + /* we can't nest cfs_b->lock while distributing bandwidth */ +- runtime = distribute_cfs_runtime(cfs_b, runtime, +- runtime_expires); ++ runtime = distribute_cfs_runtime(cfs_b, runtime); + raw_spin_lock_irqsave(&cfs_b->lock, flags); + + cfs_b->distribute_running = 0; +@@ -4834,8 +4777,7 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) + return; + + raw_spin_lock(&cfs_b->lock); +- if (cfs_b->quota != RUNTIME_INF && +- cfs_rq->runtime_expires == cfs_b->runtime_expires) { ++ if (cfs_b->quota != RUNTIME_INF) { + cfs_b->runtime += slack_runtime; + + /* we are under rq->lock, defer unthrottling using a timer */ +@@ -4868,7 +4810,6 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) + { + u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); + unsigned long flags; +- u64 expires; + + /* confirm we're still not at a refresh boundary */ + raw_spin_lock_irqsave(&cfs_b->lock, flags); +@@ -4886,7 +4827,6 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) + if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) + runtime = cfs_b->runtime; + +- expires = cfs_b->runtime_expires; + if (runtime) + cfs_b->distribute_running = 1; + +@@ -4895,11 +4835,10 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) + if (!runtime) + return; + +- runtime = distribute_cfs_runtime(cfs_b, runtime, expires); ++ runtime = distribute_cfs_runtime(cfs_b, runtime); + + raw_spin_lock_irqsave(&cfs_b->lock, flags); +- if (expires == cfs_b->runtime_expires) +- lsub_positive(&cfs_b->runtime, runtime); ++ lsub_positive(&cfs_b->runtime, runtime); + cfs_b->distribute_running = 0; + raw_spin_unlock_irqrestore(&cfs_b->lock, flags); + } +@@ -4995,20 +4934,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) + if (++count > 3) { + u64 new, old = ktime_to_ns(cfs_b->period); + +- new = (old * 147) / 128; /* ~115% */ +- new = min(new, max_cfs_quota_period); +- +- cfs_b->period = ns_to_ktime(new); +- +- /* since max is 1s, this is limited to 1e9^2, which fits in u64 */ +- cfs_b->quota *= new; +- cfs_b->quota = div64_u64(cfs_b->quota, old); +- +- pr_warn_ratelimited( +- "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n", +- smp_processor_id(), +- div_u64(new, NSEC_PER_USEC), +- div_u64(cfs_b->quota, NSEC_PER_USEC)); ++ /* ++ * Grow period by a factor of 2 to avoid losing precision. ++ * Precision loss in the quota/period ratio can cause __cfs_schedulable ++ * to fail. ++ */ ++ new = old * 2; ++ if (new < max_cfs_quota_period) { ++ cfs_b->period = ns_to_ktime(new); ++ cfs_b->quota *= 2; ++ ++ pr_warn_ratelimited( ++ "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n", ++ smp_processor_id(), ++ div_u64(new, NSEC_PER_USEC), ++ div_u64(cfs_b->quota, NSEC_PER_USEC)); ++ } else { ++ pr_warn_ratelimited( ++ "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n", ++ smp_processor_id(), ++ div_u64(old, NSEC_PER_USEC), ++ div_u64(cfs_b->quota, NSEC_PER_USEC)); ++ } + + /* reset count so we don't come right back in here */ + count = 0; +@@ -5047,17 +4994,13 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) + + void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) + { +- u64 overrun; +- + lockdep_assert_held(&cfs_b->lock); + + if (cfs_b->period_active) + return; + + cfs_b->period_active = 1; +- overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); +- cfs_b->runtime_expires += (overrun + 1) * ktime_to_ns(cfs_b->period); +- cfs_b->expires_seq++; ++ hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); + hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); + } + +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 802b1f3405f2..28c16e94bc1d 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -335,8 +335,6 @@ struct cfs_bandwidth { + u64 quota; + u64 runtime; + s64 hierarchical_quota; +- u64 runtime_expires; +- int expires_seq; + + u8 idle; + u8 period_active; +@@ -556,8 +554,6 @@ struct cfs_rq { + + #ifdef CONFIG_CFS_BANDWIDTH + int runtime_enabled; +- int expires_seq; +- u64 runtime_expires; + s64 runtime_remaining; + + u64 throttled_clock; +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 04458ed44a55..a5e27f1c35a1 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -6012,6 +6012,7 @@ waitagain: + sizeof(struct trace_iterator) - + offsetof(struct trace_iterator, seq)); + cpumask_clear(iter->started); ++ trace_seq_init(&iter->seq); + iter->pos = -1; + + trace_event_read_lock(); +diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c +index d78938e3e008..5b0b20e6da95 100644 +--- a/net/batman-adv/bat_iv_ogm.c ++++ b/net/batman-adv/bat_iv_ogm.c +@@ -22,6 +22,8 @@ + #include + #include + #include ++#include ++#include + #include + #include + #include +@@ -193,14 +195,18 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) + unsigned char *ogm_buff; + u32 random_seqno; + ++ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); ++ + /* randomize initial seqno to avoid collision */ + get_random_bytes(&random_seqno, sizeof(random_seqno)); + atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno); + + hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN; + ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC); +- if (!ogm_buff) ++ if (!ogm_buff) { ++ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); + return -ENOMEM; ++ } + + hard_iface->bat_iv.ogm_buff = ogm_buff; + +@@ -212,35 +218,59 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface) + batadv_ogm_packet->reserved = 0; + batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; + ++ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); ++ + return 0; + } + + static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface) + { ++ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); ++ + kfree(hard_iface->bat_iv.ogm_buff); + hard_iface->bat_iv.ogm_buff = NULL; ++ ++ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); + } + + static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface) + { + struct batadv_ogm_packet *batadv_ogm_packet; +- unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff; ++ void *ogm_buff; + +- batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; ++ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); ++ ++ ogm_buff = hard_iface->bat_iv.ogm_buff; ++ if (!ogm_buff) ++ goto unlock; ++ ++ batadv_ogm_packet = ogm_buff; + ether_addr_copy(batadv_ogm_packet->orig, + hard_iface->net_dev->dev_addr); + ether_addr_copy(batadv_ogm_packet->prev_sender, + hard_iface->net_dev->dev_addr); ++ ++unlock: ++ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); + } + + static void + batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface) + { + struct batadv_ogm_packet *batadv_ogm_packet; +- unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff; ++ void *ogm_buff; + +- batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff; ++ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); ++ ++ ogm_buff = hard_iface->bat_iv.ogm_buff; ++ if (!ogm_buff) ++ goto unlock; ++ ++ batadv_ogm_packet = ogm_buff; + batadv_ogm_packet->ttl = BATADV_TTL; ++ ++unlock: ++ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); + } + + /* when do we schedule our own ogm to be sent */ +@@ -742,7 +772,11 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface) + } + } + +-static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) ++/** ++ * batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer ++ * @hard_iface: interface whose ogm buffer should be transmitted ++ */ ++static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface) + { + struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); + unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff; +@@ -753,9 +787,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) + u16 tvlv_len = 0; + unsigned long send_time; + +- if (hard_iface->if_status == BATADV_IF_NOT_IN_USE || +- hard_iface->if_status == BATADV_IF_TO_BE_REMOVED) +- return; ++ lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex); + + /* the interface gets activated here to avoid race conditions between + * the moment of activating the interface in +@@ -823,6 +855,17 @@ out: + batadv_hardif_put(primary_if); + } + ++static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) ++{ ++ if (hard_iface->if_status == BATADV_IF_NOT_IN_USE || ++ hard_iface->if_status == BATADV_IF_TO_BE_REMOVED) ++ return; ++ ++ mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex); ++ batadv_iv_ogm_schedule_buff(hard_iface); ++ mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex); ++} ++ + /** + * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface + * @orig_node: originator which reproadcasted the OGMs directly +diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c +index c90e47342bb0..afb52282d5bd 100644 +--- a/net/batman-adv/hard-interface.c ++++ b/net/batman-adv/hard-interface.c +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -929,6 +930,7 @@ batadv_hardif_add_interface(struct net_device *net_dev) + INIT_LIST_HEAD(&hard_iface->list); + INIT_HLIST_HEAD(&hard_iface->neigh_list); + ++ mutex_init(&hard_iface->bat_iv.ogm_buff_mutex); + spin_lock_init(&hard_iface->neigh_list_lock); + kref_init(&hard_iface->refcount); + +diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h +index 6ae139d74e0f..10597a5f3303 100644 +--- a/net/batman-adv/types.h ++++ b/net/batman-adv/types.h +@@ -81,6 +81,9 @@ struct batadv_hard_iface_bat_iv { + + /** @ogm_seqno: OGM sequence number - used to identify each OGM */ + atomic_t ogm_seqno; ++ ++ /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */ ++ struct mutex ogm_buff_mutex; + }; + + /** +diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c +index 4d78375f9872..647c0554d04c 100644 +--- a/net/llc/llc_c_ac.c ++++ b/net/llc/llc_c_ac.c +@@ -372,6 +372,7 @@ int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) + llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR); + rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); + if (likely(!rc)) { ++ skb_get(skb); + llc_conn_send_pdu(sk, skb); + llc_conn_ac_inc_vs_by_1(sk, skb); + } +@@ -389,7 +390,8 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb) + llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); + rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); + if (likely(!rc)) { +- rc = llc_conn_send_pdu(sk, skb); ++ skb_get(skb); ++ llc_conn_send_pdu(sk, skb); + llc_conn_ac_inc_vs_by_1(sk, skb); + } + return rc; +@@ -406,6 +408,7 @@ int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) + llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); + rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); + if (likely(!rc)) { ++ skb_get(skb); + llc_conn_send_pdu(sk, skb); + llc_conn_ac_inc_vs_by_1(sk, skb); + } +@@ -916,7 +919,8 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, + llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR); + rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); + if (likely(!rc)) { +- rc = llc_conn_send_pdu(sk, skb); ++ skb_get(skb); ++ llc_conn_send_pdu(sk, skb); + llc_conn_ac_inc_vs_by_1(sk, skb); + } + return rc; +diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c +index 4ff89cb7c86f..ed2aca12460c 100644 +--- a/net/llc/llc_conn.c ++++ b/net/llc/llc_conn.c +@@ -30,7 +30,7 @@ + #endif + + static int llc_find_offset(int state, int ev_type); +-static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb); ++static void llc_conn_send_pdus(struct sock *sk); + static int llc_conn_service(struct sock *sk, struct sk_buff *skb); + static int llc_exec_conn_trans_actions(struct sock *sk, + struct llc_conn_state_trans *trans, +@@ -193,11 +193,11 @@ out_skb_put: + return rc; + } + +-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) ++void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb) + { + /* queue PDU to send to MAC layer */ + skb_queue_tail(&sk->sk_write_queue, skb); +- return llc_conn_send_pdus(sk, skb); ++ llc_conn_send_pdus(sk); + } + + /** +@@ -255,7 +255,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit) + if (howmany_resend > 0) + llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; + /* any PDUs to re-send are queued up; start sending to MAC */ +- llc_conn_send_pdus(sk, NULL); ++ llc_conn_send_pdus(sk); + out:; + } + +@@ -296,7 +296,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit) + if (howmany_resend > 0) + llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO; + /* any PDUs to re-send are queued up; start sending to MAC */ +- llc_conn_send_pdus(sk, NULL); ++ llc_conn_send_pdus(sk); + out:; + } + +@@ -340,16 +340,12 @@ out: + /** + * llc_conn_send_pdus - Sends queued PDUs + * @sk: active connection +- * @hold_skb: the skb held by caller, or NULL if does not care + * +- * Sends queued pdus to MAC layer for transmission. When @hold_skb is +- * NULL, always return 0. Otherwise, return 0 if @hold_skb is sent +- * successfully, or 1 for failure. ++ * Sends queued pdus to MAC layer for transmission. + */ +-static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb) ++static void llc_conn_send_pdus(struct sock *sk) + { + struct sk_buff *skb; +- int ret = 0; + + while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) { + struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); +@@ -361,20 +357,10 @@ static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb) + skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb); + if (!skb2) + break; +- dev_queue_xmit(skb2); +- } else { +- bool is_target = skb == hold_skb; +- int rc; +- +- if (is_target) +- skb_get(skb); +- rc = dev_queue_xmit(skb); +- if (is_target) +- ret = rc; ++ skb = skb2; + } ++ dev_queue_xmit(skb); + } +- +- return ret; + } + + /** +diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c +index a94bd56bcac6..7ae4cc684d3a 100644 +--- a/net/llc/llc_s_ac.c ++++ b/net/llc/llc_s_ac.c +@@ -58,8 +58,10 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb) + ev->daddr.lsap, LLC_PDU_CMD); + llc_pdu_init_as_ui_cmd(skb); + rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); +- if (likely(!rc)) ++ if (likely(!rc)) { ++ skb_get(skb); + rc = dev_queue_xmit(skb); ++ } + return rc; + } + +@@ -81,8 +83,10 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) + ev->daddr.lsap, LLC_PDU_CMD); + llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); + rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); +- if (likely(!rc)) ++ if (likely(!rc)) { ++ skb_get(skb); + rc = dev_queue_xmit(skb); ++ } + return rc; + } + +@@ -135,8 +139,10 @@ int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb) + ev->daddr.lsap, LLC_PDU_CMD); + llc_pdu_init_as_test_cmd(skb); + rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); +- if (likely(!rc)) ++ if (likely(!rc)) { ++ skb_get(skb); + rc = dev_queue_xmit(skb); ++ } + return rc; + } + +diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c +index a7f7b8ff4729..be419062e19a 100644 +--- a/net/llc/llc_sap.c ++++ b/net/llc/llc_sap.c +@@ -197,29 +197,22 @@ out: + * After executing actions of the event, upper layer will be indicated + * if needed(on receiving an UI frame). sk can be null for the + * datalink_proto case. ++ * ++ * This function always consumes a reference to the skb. + */ + static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb) + { + struct llc_sap_state_ev *ev = llc_sap_ev(skb); + +- /* +- * We have to hold the skb, because llc_sap_next_state +- * will kfree it in the sending path and we need to +- * look at the skb->cb, where we encode llc_sap_state_ev. +- */ +- skb_get(skb); + ev->ind_cfm_flag = 0; + llc_sap_next_state(sap, skb); +- if (ev->ind_cfm_flag == LLC_IND) { +- if (skb->sk->sk_state == TCP_LISTEN) +- kfree_skb(skb); +- else { +- llc_save_primitive(skb->sk, skb, ev->prim); + +- /* queue skb to the user. */ +- if (sock_queue_rcv_skb(skb->sk, skb)) +- kfree_skb(skb); +- } ++ if (ev->ind_cfm_flag == LLC_IND && skb->sk->sk_state != TCP_LISTEN) { ++ llc_save_primitive(skb->sk, skb, ev->prim); ++ ++ /* queue skb to the user. */ ++ if (sock_queue_rcv_skb(skb->sk, skb) == 0) ++ return; + } + kfree_skb(skb); + } +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c +index 81a8ef42b88d..56b1cf82ed3a 100644 +--- a/net/netfilter/nf_conntrack_core.c ++++ b/net/netfilter/nf_conntrack_core.c +@@ -1793,8 +1793,8 @@ void __nf_ct_refresh_acct(struct nf_conn *ct, + if (nf_ct_is_confirmed(ct)) + extra_jiffies += nfct_time_stamp; + +- if (ct->timeout != extra_jiffies) +- ct->timeout = extra_jiffies; ++ if (READ_ONCE(ct->timeout) != extra_jiffies) ++ WRITE_ONCE(ct->timeout, extra_jiffies); + acct: + if (do_acct) + nf_ct_acct_update(ct, ctinfo, skb->len); +diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c +index 9c3ac96f71cb..64830d8c1fdb 100644 +--- a/net/rxrpc/peer_object.c ++++ b/net/rxrpc/peer_object.c +@@ -216,7 +216,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp) + peer = kzalloc(sizeof(struct rxrpc_peer), gfp); + if (peer) { + atomic_set(&peer->usage, 1); +- peer->local = local; ++ peer->local = rxrpc_get_local(local); + INIT_HLIST_HEAD(&peer->error_targets); + peer->service_conns = RB_ROOT; + seqlock_init(&peer->service_conn_lock); +@@ -307,7 +307,6 @@ void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local, + unsigned long hash_key; + + hash_key = rxrpc_peer_hash_key(local, &peer->srx); +- peer->local = local; + rxrpc_init_peer(rx, peer, hash_key); + + spin_lock(&rxnet->peer_hash_lock); +@@ -382,7 +381,7 @@ struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer) + int n; + + n = atomic_inc_return(&peer->usage); +- trace_rxrpc_peer(peer, rxrpc_peer_got, n, here); ++ trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here); + return peer; + } + +@@ -396,7 +395,7 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer) + if (peer) { + int n = atomic_fetch_add_unless(&peer->usage, 1, 0); + if (n > 0) +- trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here); ++ trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here); + else + peer = NULL; + } +@@ -417,6 +416,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer) + list_del_init(&peer->keepalive_link); + spin_unlock_bh(&rxnet->peer_hash_lock); + ++ rxrpc_put_local(peer->local); + kfree_rcu(peer, rcu); + } + +@@ -426,11 +426,13 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer) + void rxrpc_put_peer(struct rxrpc_peer *peer) + { + const void *here = __builtin_return_address(0); ++ unsigned int debug_id; + int n; + + if (peer) { ++ debug_id = peer->debug_id; + n = atomic_dec_return(&peer->usage); +- trace_rxrpc_peer(peer, rxrpc_peer_put, n, here); ++ trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here); + if (n == 0) + __rxrpc_put_peer(peer); + } +@@ -443,13 +445,15 @@ void rxrpc_put_peer(struct rxrpc_peer *peer) + void rxrpc_put_peer_locked(struct rxrpc_peer *peer) + { + const void *here = __builtin_return_address(0); ++ unsigned int debug_id = peer->debug_id; + int n; + + n = atomic_dec_return(&peer->usage); +- trace_rxrpc_peer(peer, rxrpc_peer_put, n, here); ++ trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here); + if (n == 0) { + hash_del_rcu(&peer->hash_link); + list_del_init(&peer->keepalive_link); ++ rxrpc_put_local(peer->local); + kfree_rcu(peer, rcu); + } + } +diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c +index 6a1547b270fe..22f51a7e356e 100644 +--- a/net/rxrpc/sendmsg.c ++++ b/net/rxrpc/sendmsg.c +@@ -661,6 +661,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) + case RXRPC_CALL_SERVER_PREALLOC: + case RXRPC_CALL_SERVER_SECURING: + case RXRPC_CALL_SERVER_ACCEPTING: ++ rxrpc_put_call(call, rxrpc_call_put); + ret = -EBUSY; + goto error_release_sock; + default: +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c +index f5cb35e550f8..0e44039e729c 100644 +--- a/net/sched/sch_netem.c ++++ b/net/sched/sch_netem.c +@@ -476,7 +476,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, + * skb will be queued. + */ + if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { +- struct Qdisc *rootq = qdisc_root(sch); ++ struct Qdisc *rootq = qdisc_root_bh(sch); + u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ + + q->duplicate = 0; +diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c +index 1dff8506a715..d448fe3068e5 100644 +--- a/net/sched/sch_sfb.c ++++ b/net/sched/sch_sfb.c +@@ -488,7 +488,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) + { + struct sfb_sched_data *q = qdisc_priv(sch); +- struct Qdisc *child; ++ struct Qdisc *child, *old; + struct nlattr *tb[TCA_SFB_MAX + 1]; + const struct tc_sfb_qopt *ctl = &sfb_default_ops; + u32 limit; +@@ -518,8 +518,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt, + qdisc_hash_add(child, true); + sch_tree_lock(sch); + +- qdisc_tree_flush_backlog(q->qdisc); +- qdisc_put(q->qdisc); ++ qdisc_purge_queue(q->qdisc); ++ old = q->qdisc; + q->qdisc = child; + + q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval); +@@ -542,6 +542,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt, + sfb_init_perturbation(1, q); + + sch_tree_unlock(sch); ++ qdisc_put(old); + + return 0; + } +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c +index e2176c167a57..4e0b5bed6c73 100644 +--- a/net/sunrpc/xprtsock.c ++++ b/net/sunrpc/xprtsock.c +@@ -1243,19 +1243,21 @@ static void xs_error_report(struct sock *sk) + { + struct sock_xprt *transport; + struct rpc_xprt *xprt; +- int err; + + read_lock_bh(&sk->sk_callback_lock); + if (!(xprt = xprt_from_sock(sk))) + goto out; + + transport = container_of(xprt, struct sock_xprt, xprt); +- err = -sk->sk_err; +- if (err == 0) ++ transport->xprt_err = -sk->sk_err; ++ if (transport->xprt_err == 0) + goto out; + dprintk("RPC: xs_error_report client %p, error=%d...\n", +- xprt, -err); +- trace_rpc_socket_error(xprt, sk->sk_socket, err); ++ xprt, -transport->xprt_err); ++ trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err); ++ ++ /* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */ ++ smp_mb__before_atomic(); + xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR); + out: + read_unlock_bh(&sk->sk_callback_lock); +@@ -2470,7 +2472,6 @@ static void xs_wake_write(struct sock_xprt *transport) + static void xs_wake_error(struct sock_xprt *transport) + { + int sockerr; +- int sockerr_len = sizeof(sockerr); + + if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) + return; +@@ -2479,9 +2480,7 @@ static void xs_wake_error(struct sock_xprt *transport) + goto out; + if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state)) + goto out; +- if (kernel_getsockopt(transport->sock, SOL_SOCKET, SO_ERROR, +- (char *)&sockerr, &sockerr_len) != 0) +- goto out; ++ sockerr = xchg(&transport->xprt_err, 0); + if (sockerr < 0) + xprt_wake_pending_tasks(&transport->xprt, sockerr); + out: +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index c2ce582ea143..da752caa1cda 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -377,7 +377,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { + [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ }, + [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, + .len = IEEE80211_MAX_MESH_ID_LEN }, +- [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, ++ [NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT, + + [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 }, + [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED }, +diff --git a/sound/core/timer.c b/sound/core/timer.c +index 5c9fbf3f4340..6b724d2ee2de 100644 +--- a/sound/core/timer.c ++++ b/sound/core/timer.c +@@ -226,7 +226,8 @@ static int snd_timer_check_master(struct snd_timer_instance *master) + return 0; + } + +-static int snd_timer_close_locked(struct snd_timer_instance *timeri); ++static int snd_timer_close_locked(struct snd_timer_instance *timeri, ++ struct device **card_devp_to_put); + + /* + * open a timer instance +@@ -238,6 +239,7 @@ int snd_timer_open(struct snd_timer_instance **ti, + { + struct snd_timer *timer; + struct snd_timer_instance *timeri = NULL; ++ struct device *card_dev_to_put = NULL; + int err; + + mutex_lock(®ister_mutex); +@@ -261,7 +263,7 @@ int snd_timer_open(struct snd_timer_instance **ti, + list_add_tail(&timeri->open_list, &snd_timer_slave_list); + err = snd_timer_check_slave(timeri); + if (err < 0) { +- snd_timer_close_locked(timeri); ++ snd_timer_close_locked(timeri, &card_dev_to_put); + timeri = NULL; + } + goto unlock; +@@ -313,7 +315,7 @@ int snd_timer_open(struct snd_timer_instance **ti, + timeri = NULL; + + if (timer->card) +- put_device(&timer->card->card_dev); ++ card_dev_to_put = &timer->card->card_dev; + module_put(timer->module); + goto unlock; + } +@@ -323,12 +325,15 @@ int snd_timer_open(struct snd_timer_instance **ti, + timer->num_instances++; + err = snd_timer_check_master(timeri); + if (err < 0) { +- snd_timer_close_locked(timeri); ++ snd_timer_close_locked(timeri, &card_dev_to_put); + timeri = NULL; + } + + unlock: + mutex_unlock(®ister_mutex); ++ /* put_device() is called after unlock for avoiding deadlock */ ++ if (card_dev_to_put) ++ put_device(card_dev_to_put); + *ti = timeri; + return err; + } +@@ -338,7 +343,8 @@ EXPORT_SYMBOL(snd_timer_open); + * close a timer instance + * call this with register_mutex down. + */ +-static int snd_timer_close_locked(struct snd_timer_instance *timeri) ++static int snd_timer_close_locked(struct snd_timer_instance *timeri, ++ struct device **card_devp_to_put) + { + struct snd_timer *timer = timeri->timer; + struct snd_timer_instance *slave, *tmp; +@@ -395,7 +401,7 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri) + timer->hw.close(timer); + /* release a card refcount for safe disconnection */ + if (timer->card) +- put_device(&timer->card->card_dev); ++ *card_devp_to_put = &timer->card->card_dev; + module_put(timer->module); + } + +@@ -407,14 +413,18 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri) + */ + int snd_timer_close(struct snd_timer_instance *timeri) + { ++ struct device *card_dev_to_put = NULL; + int err; + + if (snd_BUG_ON(!timeri)) + return -ENXIO; + + mutex_lock(®ister_mutex); +- err = snd_timer_close_locked(timeri); ++ err = snd_timer_close_locked(timeri, &card_dev_to_put); + mutex_unlock(®ister_mutex); ++ /* put_device() is called after unlock for avoiding deadlock */ ++ if (card_dev_to_put) ++ put_device(card_dev_to_put); + return err; + } + EXPORT_SYMBOL(snd_timer_close); +diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c +index 334dc7c96e1d..80ea162bf1a1 100644 +--- a/sound/firewire/bebob/bebob_stream.c ++++ b/sound/firewire/bebob/bebob_stream.c +@@ -252,8 +252,7 @@ end: + return err; + } + +-static unsigned int +-map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s) ++static int map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s) + { + unsigned int sec, sections, ch, channels; + unsigned int pcm, midi, location; +diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c +index 196bbc85699e..3b0110545070 100644 +--- a/sound/hda/hdac_controller.c ++++ b/sound/hda/hdac_controller.c +@@ -447,8 +447,6 @@ static void azx_int_disable(struct hdac_bus *bus) + list_for_each_entry(azx_dev, &bus->stream_list, list) + snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0); + +- synchronize_irq(bus->irq); +- + /* disable SIE for all streams */ + snd_hdac_chip_writeb(bus, INTCTL, 0); + +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 783f9a9c40ec..b0de3e3b33e5 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -1349,9 +1349,9 @@ static int azx_free(struct azx *chip) + } + + if (bus->chip_init) { +- azx_stop_chip(chip); + azx_clear_irq_pending(chip); + azx_stop_all_streams(chip); ++ azx_stop_chip(chip); + } + + if (bus->irq >= 0) +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 26249c607f2c..d4daa3c937ba 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -409,6 +409,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) + case 0x10ec0672: + alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */ + break; ++ case 0x10ec0623: ++ alc_update_coef_idx(codec, 0x19, 1<<13, 0); ++ break; + case 0x10ec0668: + alc_update_coef_idx(codec, 0x7, 3<<13, 0); + break; +@@ -2919,6 +2922,7 @@ enum { + ALC269_TYPE_ALC225, + ALC269_TYPE_ALC294, + ALC269_TYPE_ALC300, ++ ALC269_TYPE_ALC623, + ALC269_TYPE_ALC700, + }; + +@@ -2954,6 +2958,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec) + case ALC269_TYPE_ALC225: + case ALC269_TYPE_ALC294: + case ALC269_TYPE_ALC300: ++ case ALC269_TYPE_ALC623: + case ALC269_TYPE_ALC700: + ssids = alc269_ssids; + break; +@@ -7187,6 +7192,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), ++ SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), ++ SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), +@@ -7974,6 +7981,9 @@ static int patch_alc269(struct hda_codec *codec) + spec->codec_variant = ALC269_TYPE_ALC300; + spec->gen.mixer_nid = 0; /* no loopback on ALC300 */ + break; ++ case 0x10ec0623: ++ spec->codec_variant = ALC269_TYPE_ALC623; ++ break; + case 0x10ec0700: + case 0x10ec0701: + case 0x10ec0703: +@@ -9101,6 +9111,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { + HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0300, "ALC300", patch_alc269), ++ HDA_CODEC_ENTRY(0x10ec0623, "ALC623", patch_alc269), + HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861), + HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd), + HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861), +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c +index b6f7b13768a1..059b70313f35 100644 +--- a/sound/usb/quirks.c ++++ b/sound/usb/quirks.c +@@ -1563,7 +1563,8 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, + struct usb_interface *iface; + + /* Playback Designs */ +- if (USB_ID_VENDOR(chip->usb_id) == 0x23ba) { ++ if (USB_ID_VENDOR(chip->usb_id) == 0x23ba && ++ USB_ID_PRODUCT(chip->usb_id) < 0x0110) { + switch (fp->altsetting) { + case 1: + fp->dsd_dop = true; +@@ -1580,9 +1581,6 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, + /* XMOS based USB DACs */ + switch (chip->usb_id) { + case USB_ID(0x1511, 0x0037): /* AURALiC VEGA */ +- case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */ +- case USB_ID(0x22d9, 0x0436): /* OPPO Sonica */ +- case USB_ID(0x22d9, 0x0461): /* OPPO UDP-205 */ + case USB_ID(0x2522, 0x0012): /* LH Labs VI DAC Infinity */ + case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */ + if (fp->altsetting == 2) +@@ -1596,7 +1594,6 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, + case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */ + case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */ + case USB_ID(0x1db5, 0x0003): /* Bryston BDA3 */ +- case USB_ID(0x22d9, 0x0426): /* OPPO HA-2 */ + case USB_ID(0x22e1, 0xca01): /* HDTA Serenade DSD */ + case USB_ID(0x249c, 0x9326): /* M2Tech Young MkIII */ + case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */ +@@ -1651,9 +1648,13 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, + * from XMOS/Thesycon + */ + switch (USB_ID_VENDOR(chip->usb_id)) { +- case 0x20b1: /* XMOS based devices */ + case 0x152a: /* Thesycon devices */ ++ case 0x20b1: /* XMOS based devices */ ++ case 0x22d9: /* Oppo */ ++ case 0x23ba: /* Playback Designs */ + case 0x25ce: /* Mytek devices */ ++ case 0x278b: /* Rotel? */ ++ case 0x292b: /* Gustard/Ess based devices */ + case 0x2ab6: /* T+A devices */ + case 0x3842: /* EVGA */ + case 0xc502: /* HiBy devices */ +diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile +index ed61fb3a46c0..5b2cd5e58df0 100644 +--- a/tools/lib/subcmd/Makefile ++++ b/tools/lib/subcmd/Makefile +@@ -20,7 +20,13 @@ MAKEFLAGS += --no-print-directory + LIBFILE = $(OUTPUT)libsubcmd.a + + CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) +-CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC ++CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -fPIC ++ ++ifeq ($(DEBUG),0) ++ ifeq ($(feature-fortify-source), 1) ++ CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 ++ endif ++endif + + ifeq ($(CC_NO_CLANG), 0) + CFLAGS += -O3 +diff --git a/tools/perf/arch/arm/annotate/instructions.c b/tools/perf/arch/arm/annotate/instructions.c +index c7d1a69b894f..19ac54758c71 100644 +--- a/tools/perf/arch/arm/annotate/instructions.c ++++ b/tools/perf/arch/arm/annotate/instructions.c +@@ -36,7 +36,7 @@ static int arm__annotate_init(struct arch *arch, char *cpuid __maybe_unused) + + arm = zalloc(sizeof(*arm)); + if (!arm) +- return -1; ++ return ENOMEM; + + #define ARM_CONDS "(cc|cs|eq|ge|gt|hi|le|ls|lt|mi|ne|pl|vc|vs)" + err = regcomp(&arm->call_insn, "^blx?" ARM_CONDS "?$", REG_EXTENDED); +@@ -58,5 +58,5 @@ out_free_call: + regfree(&arm->call_insn); + out_free_arm: + free(arm); +- return -1; ++ return SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP; + } +diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c +index 8f70a1b282df..223e2f161f41 100644 +--- a/tools/perf/arch/arm64/annotate/instructions.c ++++ b/tools/perf/arch/arm64/annotate/instructions.c +@@ -94,7 +94,7 @@ static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused) + + arm = zalloc(sizeof(*arm)); + if (!arm) +- return -1; ++ return ENOMEM; + + /* bl, blr */ + err = regcomp(&arm->call_insn, "^blr?$", REG_EXTENDED); +@@ -117,5 +117,5 @@ out_free_call: + regfree(&arm->call_insn); + out_free_arm: + free(arm); +- return -1; ++ return SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP; + } +diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c +index 0b242664f5ea..e46be9ef5a68 100644 +--- a/tools/perf/arch/powerpc/util/header.c ++++ b/tools/perf/arch/powerpc/util/header.c +@@ -1,5 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0 + #include ++#include + #include + #include + #include +@@ -31,7 +32,7 @@ get_cpuid(char *buffer, size_t sz) + buffer[nb-1] = '\0'; + return 0; + } +- return -1; ++ return ENOBUFS; + } + + char * +diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c +index 89bb8f2c54ce..a50e70baf918 100644 +--- a/tools/perf/arch/s390/annotate/instructions.c ++++ b/tools/perf/arch/s390/annotate/instructions.c +@@ -164,8 +164,10 @@ static int s390__annotate_init(struct arch *arch, char *cpuid __maybe_unused) + if (!arch->initialized) { + arch->initialized = true; + arch->associate_instruction_ops = s390__associate_ins_ops; +- if (cpuid) +- err = s390__cpuid_parse(arch, cpuid); ++ if (cpuid) { ++ if (s390__cpuid_parse(arch, cpuid)) ++ err = SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING; ++ } + } + + return err; +diff --git a/tools/perf/arch/s390/util/header.c b/tools/perf/arch/s390/util/header.c +index 8b0b018d896a..7933f6871c81 100644 +--- a/tools/perf/arch/s390/util/header.c ++++ b/tools/perf/arch/s390/util/header.c +@@ -8,6 +8,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -54,7 +55,7 @@ int get_cpuid(char *buffer, size_t sz) + + sysinfo = fopen(SYSINFO, "r"); + if (sysinfo == NULL) +- return -1; ++ return errno; + + while ((read = getline(&line, &line_sz, sysinfo)) != -1) { + if (!strncmp(line, SYSINFO_MANU, strlen(SYSINFO_MANU))) { +@@ -89,7 +90,7 @@ int get_cpuid(char *buffer, size_t sz) + + /* Missing manufacturer, type or model information should not happen */ + if (!manufacturer[0] || !type[0] || !model[0]) +- return -1; ++ return EINVAL; + + /* + * Scan /proc/service_levels and return the CPU-MF counter facility +@@ -133,14 +134,14 @@ skip_sysinfo: + else + nbytes = snprintf(buffer, sz, "%s,%s,%s", manufacturer, type, + model); +- return (nbytes >= sz) ? -1 : 0; ++ return (nbytes >= sz) ? ENOBUFS : 0; + } + + char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused) + { + char *buf = malloc(128); + +- if (buf && get_cpuid(buf, 128) < 0) ++ if (buf && get_cpuid(buf, 128)) + zfree(&buf); + return buf; + } +diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c +index 44f5aba78210..7eb5621c021d 100644 +--- a/tools/perf/arch/x86/annotate/instructions.c ++++ b/tools/perf/arch/x86/annotate/instructions.c +@@ -196,8 +196,10 @@ static int x86__annotate_init(struct arch *arch, char *cpuid) + if (arch->initialized) + return 0; + +- if (cpuid) +- err = x86__cpuid_parse(arch, cpuid); ++ if (cpuid) { ++ if (x86__cpuid_parse(arch, cpuid)) ++ err = SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING; ++ } + + arch->initialized = true; + return err; +diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c +index af9a9f2600be..a089af60906a 100644 +--- a/tools/perf/arch/x86/util/header.c ++++ b/tools/perf/arch/x86/util/header.c +@@ -1,5 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0 + #include ++#include + #include + #include + #include +@@ -57,7 +58,7 @@ __get_cpuid(char *buffer, size_t sz, const char *fmt) + buffer[nb-1] = '\0'; + return 0; + } +- return -1; ++ return ENOBUFS; + } + + int +diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c +index b33c83489120..44ff3ea1da23 100644 +--- a/tools/perf/builtin-kvm.c ++++ b/tools/perf/builtin-kvm.c +@@ -699,14 +699,15 @@ static int process_sample_event(struct perf_tool *tool, + + static int cpu_isa_config(struct perf_kvm_stat *kvm) + { +- char buf[64], *cpuid; ++ char buf[128], *cpuid; + int err; + + if (kvm->live) { + err = get_cpuid(buf, sizeof(buf)); + if (err != 0) { +- pr_err("Failed to look up CPU type\n"); +- return err; ++ pr_err("Failed to look up CPU type: %s\n", ++ str_error_r(err, buf, sizeof(buf))); ++ return -err; + } + cpuid = buf; + } else +diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c +index 0140ddb8dd0b..c14a1cdad80c 100644 +--- a/tools/perf/builtin-script.c ++++ b/tools/perf/builtin-script.c +@@ -1054,7 +1054,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, + continue; + + insn = 0; +- for (off = 0;; off += ilen) { ++ for (off = 0; off < (unsigned)len; off += ilen) { + uint64_t ip = start + off; + + printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp); +@@ -1065,6 +1065,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, + printed += print_srccode(thread, x.cpumode, ip); + break; + } else { ++ ilen = 0; + printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip, + dump_insn(&x, ip, buffer + off, len - off, &ilen)); + if (ilen == 0) +@@ -1074,6 +1075,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, + insn++; + } + } ++ if (off != (unsigned)len) ++ printed += fprintf(fp, "\tmismatch of LBR data and executable\n"); + } + + /* +@@ -1114,6 +1117,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample, + goto out; + } + for (off = 0; off <= end - start; off += ilen) { ++ ilen = 0; + printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", start + off, + dump_insn(&x, start + off, buffer + off, len - off, &ilen)); + if (ilen == 0) +diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c +index d413761621b0..fa85e33762f7 100644 +--- a/tools/perf/pmu-events/jevents.c ++++ b/tools/perf/pmu-events/jevents.c +@@ -449,12 +449,12 @@ static struct fixed { + const char *name; + const char *event; + } fixed[] = { +- { "inst_retired.any", "event=0xc0" }, +- { "inst_retired.any_p", "event=0xc0" }, +- { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" }, +- { "cpu_clk_unhalted.thread", "event=0x3c" }, +- { "cpu_clk_unhalted.core", "event=0x3c" }, +- { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" }, ++ { "inst_retired.any", "event=0xc0,period=2000003" }, ++ { "inst_retired.any_p", "event=0xc0,period=2000003" }, ++ { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03,period=2000003" }, ++ { "cpu_clk_unhalted.thread", "event=0x3c,period=2000003" }, ++ { "cpu_clk_unhalted.core", "event=0x3c,period=2000003" }, ++ { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1,period=2000003" }, + { NULL, NULL}, + }; + +diff --git a/tools/perf/tests/perf-hooks.c b/tools/perf/tests/perf-hooks.c +index a693bcf017ea..44c16fd11bf6 100644 +--- a/tools/perf/tests/perf-hooks.c ++++ b/tools/perf/tests/perf-hooks.c +@@ -20,12 +20,11 @@ static void sigsegv_handler(int sig __maybe_unused) + static void the_hook(void *_hook_flags) + { + int *hook_flags = _hook_flags; +- int *p = NULL; + + *hook_flags = 1234; + + /* Generate a segfault, test perf_hooks__recover */ +- *p = 0; ++ raise(SIGSEGV); + } + + int test__perf_hooks(struct test *test __maybe_unused, int subtest __maybe_unused) +diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c +index 163536720149..2e02d2a0176a 100644 +--- a/tools/perf/util/annotate.c ++++ b/tools/perf/util/annotate.c +@@ -1625,6 +1625,19 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map * + case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF: + scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation"); + break; ++ case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP: ++ scnprintf(buf, buflen, "Problems with arch specific instruction name regular expressions."); ++ break; ++ case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING: ++ scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization."); ++ break; ++ case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE: ++ scnprintf(buf, buflen, "Invalid BPF file: %s.", dso->long_name); ++ break; ++ case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF: ++ scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.", ++ dso->long_name); ++ break; + default: + scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum); + break; +@@ -1656,7 +1669,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil + + build_id_path = strdup(filename); + if (!build_id_path) +- return -1; ++ return ENOMEM; + + /* + * old style build-id cache has name of XX/XXXXXXX.. while +@@ -1707,13 +1720,13 @@ static int symbol__disassemble_bpf(struct symbol *sym, + char tpath[PATH_MAX]; + size_t buf_size; + int nr_skip = 0; +- int ret = -1; + char *buf; + bfd *bfdf; ++ int ret; + FILE *s; + + if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO) +- return -1; ++ return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE; + + pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__, + sym->name, sym->start, sym->end - sym->start); +@@ -1726,8 +1739,10 @@ static int symbol__disassemble_bpf(struct symbol *sym, + assert(bfd_check_format(bfdf, bfd_object)); + + s = open_memstream(&buf, &buf_size); +- if (!s) ++ if (!s) { ++ ret = errno; + goto out; ++ } + init_disassemble_info(&info, s, + (fprintf_ftype) fprintf); + +@@ -1736,8 +1751,10 @@ static int symbol__disassemble_bpf(struct symbol *sym, + + info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, + dso->bpf_prog.id); +- if (!info_node) ++ if (!info_node) { ++ ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF; + goto out; ++ } + info_linear = info_node->info_linear; + sub_id = dso->bpf_prog.sub_id; + +@@ -2065,11 +2082,11 @@ int symbol__annotate(struct symbol *sym, struct map *map, + int err; + + if (!arch_name) +- return -1; ++ return errno; + + args.arch = arch = arch__find(arch_name); + if (arch == NULL) +- return -ENOTSUP; ++ return ENOTSUP; + + if (parch) + *parch = arch; +@@ -2965,7 +2982,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev + + notes->offsets = zalloc(size * sizeof(struct annotation_line *)); + if (notes->offsets == NULL) +- return -1; ++ return ENOMEM; + + if (perf_evsel__is_group_event(evsel)) + nr_pcnt = evsel->nr_members; +@@ -2991,7 +3008,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev + + out_free_offsets: + zfree(¬es->offsets); +- return -1; ++ return err; + } + + #define ANNOTATION__CFG(n) \ +diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h +index 5bc0cf655d37..2004e2cf0211 100644 +--- a/tools/perf/util/annotate.h ++++ b/tools/perf/util/annotate.h +@@ -370,6 +370,10 @@ enum symbol_disassemble_errno { + + SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX = __SYMBOL_ANNOTATE_ERRNO__START, + SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF, ++ SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING, ++ SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP, ++ SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE, ++ SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF, + + __SYMBOL_ANNOTATE_ERRNO__END, + }; +diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c +index 7666206d06fa..f18113581cf0 100644 +--- a/tools/perf/util/map.c ++++ b/tools/perf/util/map.c +@@ -1,5 +1,6 @@ + // SPDX-License-Identifier: GPL-2.0 + #include "symbol.h" ++#include + #include + #include + #include +@@ -847,6 +848,8 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp + } + + after->start = map->end; ++ after->pgoff += map->end - pos->start; ++ assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end)); + __map_groups__insert(pos->groups, after); + if (verbose >= 2 && !use_browser) + map__fprintf(after, fp); +diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile +index 25b43a8c2b15..1779923d7a7b 100644 +--- a/tools/testing/selftests/Makefile ++++ b/tools/testing/selftests/Makefile +@@ -198,8 +198,12 @@ ifdef INSTALL_PATH + echo " cat /dev/null > \$$logfile" >> $(ALL_SCRIPT) + echo "fi" >> $(ALL_SCRIPT) + ++ @# While building run_kselftest.sh skip also non-existent TARGET dirs: ++ @# they could be the result of a build failure and should NOT be ++ @# included in the generated runlist. + for TARGET in $(TARGETS); do \ + BUILD_TARGET=$$BUILD/$$TARGET; \ ++ [ ! -d $$INSTALL_PATH/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \ + echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \ + echo "cd $$TARGET" >> $(ALL_SCRIPT); \ + echo -n "run_many" >> $(ALL_SCRIPT); \ +diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh +index 00c9020bdda8..84de7bc74f2c 100644 +--- a/tools/testing/selftests/kselftest/runner.sh ++++ b/tools/testing/selftests/kselftest/runner.sh +@@ -3,9 +3,14 @@ + # + # Runs a set of tests in a given subdirectory. + export skip_rc=4 ++export timeout_rc=124 + export logfile=/dev/stdout + export per_test_logging= + ++# Defaults for "settings" file fields: ++# "timeout" how many seconds to let each test run before failing. ++export kselftest_default_timeout=45 ++ + # There isn't a shell-agnostic way to find the path of a sourced file, + # so we must rely on BASE_DIR being set to find other tools. + if [ -z "$BASE_DIR" ]; then +@@ -24,6 +29,16 @@ tap_prefix() + fi + } + ++tap_timeout() ++{ ++ # Make sure tests will time out if utility is available. ++ if [ -x /usr/bin/timeout ] ; then ++ /usr/bin/timeout "$kselftest_timeout" "$1" ++ else ++ "$1" ++ fi ++} ++ + run_one() + { + DIR="$1" +@@ -32,6 +47,18 @@ run_one() + + BASENAME_TEST=$(basename $TEST) + ++ # Reset any "settings"-file variables. ++ export kselftest_timeout="$kselftest_default_timeout" ++ # Load per-test-directory kselftest "settings" file. ++ settings="$BASE_DIR/$DIR/settings" ++ if [ -r "$settings" ] ; then ++ while read line ; do ++ field=$(echo "$line" | cut -d= -f1) ++ value=$(echo "$line" | cut -d= -f2-) ++ eval "kselftest_$field"="$value" ++ done < "$settings" ++ fi ++ + TEST_HDR_MSG="selftests: $DIR: $BASENAME_TEST" + echo "# $TEST_HDR_MSG" + if [ ! -x "$TEST" ]; then +@@ -44,14 +71,17 @@ run_one() + echo "not ok $test_num $TEST_HDR_MSG" + else + cd `dirname $TEST` > /dev/null +- (((((./$BASENAME_TEST 2>&1; echo $? >&3) | ++ ((((( tap_timeout ./$BASENAME_TEST 2>&1; echo $? >&3) | + tap_prefix >&4) 3>&1) | + (read xs; exit $xs)) 4>>"$logfile" && + echo "ok $test_num $TEST_HDR_MSG") || +- (if [ $? -eq $skip_rc ]; then \ ++ (rc=$?; \ ++ if [ $rc -eq $skip_rc ]; then \ + echo "not ok $test_num $TEST_HDR_MSG # SKIP" ++ elif [ $rc -eq $timeout_rc ]; then \ ++ echo "not ok $test_num $TEST_HDR_MSG # TIMEOUT" + else +- echo "not ok $test_num $TEST_HDR_MSG" ++ echo "not ok $test_num $TEST_HDR_MSG # exit=$rc" + fi) + cd - >/dev/null + fi +diff --git a/tools/testing/selftests/rtc/settings b/tools/testing/selftests/rtc/settings +new file mode 100644 +index 000000000000..ba4d85f74cd6 +--- /dev/null ++++ b/tools/testing/selftests/rtc/settings +@@ -0,0 +1 @@ ++timeout=90