From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:6.6 commit in: /
Date: Wed, 18 Sep 2024 18:03:03 +0000 (UTC) [thread overview]
Message-ID: <1726682572.d680fe6b99bd33137e4e0953dc99afe62972f09a.mpagano@gentoo> (raw)
commit: d680fe6b99bd33137e4e0953dc99afe62972f09a
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 18 18:02:52 2024 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 18 18:02:52 2024 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d680fe6b
Linux patch 6.6.52
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1051_linux-6.6.52.patch | 2980 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 2984 insertions(+)
diff --git a/0000_README b/0000_README
index fb6aa265..564b03d6 100644
--- a/0000_README
+++ b/0000_README
@@ -247,6 +247,10 @@ Patch: 1050_linux-6.6.51.patch
From: https://www.kernel.org
Desc: Linux 6.6.51
+Patch: 1051_linux-6.6.52.patch
+From: https://www.kernel.org
+Desc: Linux 6.6.52
+
Patch: 1510_fs-enable-link-security-restrictions-by-default.patch
From: http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
Desc: Enable link security restrictions by default.
diff --git a/1051_linux-6.6.52.patch b/1051_linux-6.6.52.patch
new file mode 100644
index 00000000..21206022
--- /dev/null
+++ b/1051_linux-6.6.52.patch
@@ -0,0 +1,2980 @@
+diff --git a/Makefile b/Makefile
+index 6dea0c21636820..5b22e3ff440ca1 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 6
+-SUBLEVEL = 51
++SUBLEVEL = 52
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+index d9905a08c6ce86..66443d52cd34d8 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+@@ -332,7 +332,7 @@ led_pin: led-pin {
+
+ pmic {
+ pmic_int_l: pmic-int-l {
+- rockchip,pins = <2 RK_PA6 RK_FUNC_GPIO &pcfg_pull_up>;
++ rockchip,pins = <0 RK_PA2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+index f2ca5d30d223ce..aba2748fe54c77 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+@@ -119,6 +119,22 @@ &emmc_phy {
+ drive-impedance-ohm = <33>;
+ };
+
++&gpio3 {
++ /*
++ * The Qseven BIOS_DISABLE signal on the RK3399-Q7 keeps the on-module
++ * eMMC and SPI flash powered-down initially (in fact it keeps the
++ * reset signal asserted). BIOS_DISABLE_OVERRIDE pin allows to override
++ * that signal so that eMMC and SPI can be used regardless of the state
++ * of the signal.
++ */
++ bios-disable-override-hog {
++ gpios = <RK_PD5 GPIO_ACTIVE_LOW>;
++ gpio-hog;
++ line-name = "bios_disable_override";
++ output-high;
++ };
++};
++
+ &gmac {
+ assigned-clocks = <&cru SCLK_RMII_SRC>;
+ assigned-clock-parents = <&clkin_gmac>;
+@@ -374,6 +390,7 @@ vdd_cpu_b: regulator@60 {
+
+ &i2s0 {
+ pinctrl-0 = <&i2s0_2ch_bus>;
++ pinctrl-1 = <&i2s0_2ch_bus_bclk_off>;
+ rockchip,playback-channels = <2>;
+ rockchip,capture-channels = <2>;
+ status = "okay";
+@@ -382,8 +399,8 @@ &i2s0 {
+ /*
+ * As Q7 does not specify neither a global nor a RX clock for I2S these
+ * signals are not used. Furthermore I2S0_LRCK_RX is used as GPIO.
+- * Therefore we have to redefine the i2s0_2ch_bus definition to prevent
+- * conflicts.
++ * Therefore we have to redefine the i2s0_2ch_bus and i2s0_2ch_bus_bclk_off
++ * definitions to prevent conflicts.
+ */
+ &i2s0_2ch_bus {
+ rockchip,pins =
+@@ -393,6 +410,14 @@ &i2s0_2ch_bus {
+ <3 RK_PD7 1 &pcfg_pull_none>;
+ };
+
++&i2s0_2ch_bus_bclk_off {
++ rockchip,pins =
++ <3 RK_PD0 RK_FUNC_GPIO &pcfg_pull_none>,
++ <3 RK_PD2 1 &pcfg_pull_none>,
++ <3 RK_PD3 1 &pcfg_pull_none>,
++ <3 RK_PD7 1 &pcfg_pull_none>;
++};
++
+ &io_domains {
+ status = "okay";
+ bt656-supply = <&vcc_1v8>;
+@@ -408,9 +433,14 @@ &pcie_clkreqn_cpm {
+
+ &pinctrl {
+ pinctrl-names = "default";
+- pinctrl-0 = <&q7_thermal_pin>;
++ pinctrl-0 = <&q7_thermal_pin &bios_disable_override_hog_pin>;
+
+ gpios {
++ bios_disable_override_hog_pin: bios-disable-override-hog-pin {
++ rockchip,pins =
++ <3 RK_PD5 RK_FUNC_GPIO &pcfg_pull_down>;
++ };
++
+ q7_thermal_pin: q7-thermal-pin {
+ rockchip,pins =
+ <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index 20f72cd1d8138d..03eaad5949f141 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -950,6 +950,7 @@ void __init setup_arch(char **cmdline_p)
+ mem_topology_setup();
+ /* Set max_mapnr before paging_init() */
+ set_max_mapnr(max_pfn);
++ high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
+
+ /*
+ * Release secondary cpus out of their spinloops at 0x60 now that
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 07e8f4f1e07f89..9dbef559af4cbf 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -287,8 +287,6 @@ void __init mem_init(void)
+ swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
+ #endif
+
+- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+-
+ kasan_late_init();
+
+ memblock_free_all();
+diff --git a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
+index 062b97c6e7dff4..4874e3bb42ab10 100644
+--- a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
++++ b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
+@@ -204,6 +204,8 @@ &i2c6 {
+
+ &mmc0 {
+ max-frequency = <100000000>;
++ assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO0_SDCARD>;
++ assigned-clock-rates = <50000000>;
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ mmc-ddr-1_8v;
+@@ -220,6 +222,8 @@ &mmc0 {
+
+ &mmc1 {
+ max-frequency = <100000000>;
++ assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO1_SDCARD>;
++ assigned-clock-rates = <50000000>;
+ bus-width = <4>;
+ no-sdio;
+ no-mmc;
+diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
+index 8f3a4d16bb791f..d1e2d12279e268 100644
+--- a/arch/x86/hyperv/hv_init.c
++++ b/arch/x86/hyperv/hv_init.c
+@@ -35,7 +35,6 @@
+ #include <clocksource/hyperv_timer.h>
+ #include <linux/highmem.h>
+
+-int hyperv_init_cpuhp;
+ u64 hv_current_partition_id = ~0ull;
+ EXPORT_SYMBOL_GPL(hv_current_partition_id);
+
+@@ -607,8 +606,6 @@ void __init hyperv_init(void)
+
+ register_syscore_ops(&hv_syscore_ops);
+
+- hyperv_init_cpuhp = cpuhp;
+-
+ if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_ACCESS_PARTITION_ID)
+ hv_get_partition_id();
+
+@@ -637,7 +634,7 @@ void __init hyperv_init(void)
+ clean_guest_os_id:
+ wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
+ hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
+- cpuhp_remove_state(cpuhp);
++ cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
+ free_ghcb_page:
+ free_percpu(hv_ghcb_pg);
+ free_vp_assist_page:
+diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
+index 896445edc6a8e9..ec95d6e9f1682c 100644
+--- a/arch/x86/include/asm/mshyperv.h
++++ b/arch/x86/include/asm/mshyperv.h
+@@ -40,7 +40,6 @@ static inline unsigned char hv_get_nmi_reason(void)
+ }
+
+ #if IS_ENABLED(CONFIG_HYPERV)
+-extern int hyperv_init_cpuhp;
+ extern bool hyperv_paravisor_present;
+
+ extern void *hv_hypercall_pg;
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index e6bba12c759cb7..fac4b4116efe10 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -199,8 +199,8 @@ static void hv_machine_shutdown(void)
+ * Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
+ * corrupts the old VP Assist Pages and can crash the kexec kernel.
+ */
+- if (kexec_in_progress && hyperv_init_cpuhp > 0)
+- cpuhp_remove_state(hyperv_init_cpuhp);
++ if (kexec_in_progress)
++ cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
+
+ /* The function calls stop_other_cpus(). */
+ native_machine_shutdown();
+diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
+index 59c7f88b915a43..edb46123e3eb0f 100644
+--- a/drivers/cxl/cxlmem.h
++++ b/drivers/cxl/cxlmem.h
+@@ -538,7 +538,7 @@ enum cxl_opcode {
+ 0x3b, 0x3f, 0x17)
+
+ #define DEFINE_CXL_VENDOR_DEBUG_UUID \
+- UUID_INIT(0xe1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 0xd6, 0x07, 0x19, \
++ UUID_INIT(0x5e1819d9, 0x11a9, 0x400c, 0x81, 0x1f, 0xd6, 0x07, 0x19, \
+ 0x40, 0x3d, 0x86)
+
+ struct cxl_mbox_get_supported_logs {
+diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
+index ee899f8e67215f..bea7e574f916e1 100644
+--- a/drivers/dma-buf/heaps/cma_heap.c
++++ b/drivers/dma-buf/heaps/cma_heap.c
+@@ -165,7 +165,7 @@ static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
+ struct vm_area_struct *vma = vmf->vma;
+ struct cma_heap_buffer *buffer = vma->vm_private_data;
+
+- if (vmf->pgoff > buffer->pagecount)
++ if (vmf->pgoff >= buffer->pagecount)
+ return VM_FAULT_SIGBUS;
+
+ vmf->page = buffer->pages[vmf->pgoff];
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+index 77595e9622da34..7ac0228fe532ee 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+@@ -23,6 +23,7 @@
+
+ #include "amdgpu.h"
+ #include "amdgpu_jpeg.h"
++#include "amdgpu_cs.h"
+ #include "soc15.h"
+ #include "soc15d.h"
+ #include "vcn_v1_0.h"
+@@ -34,6 +35,9 @@
+ static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
+ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+ static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring);
++static int jpeg_v1_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
++ struct amdgpu_job *job,
++ struct amdgpu_ib *ib);
+
+ static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
+ {
+@@ -300,7 +304,10 @@ static void jpeg_v1_0_decode_ring_emit_ib(struct amdgpu_ring *ring,
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
+- amdgpu_ring_write(ring, (vmid | (vmid << 4)));
++ if (ring->funcs->parse_cs)
++ amdgpu_ring_write(ring, 0);
++ else
++ amdgpu_ring_write(ring, (vmid | (vmid << 4)));
+
+ amdgpu_ring_write(ring,
+ PACKETJ(SOC15_REG_OFFSET(JPEG, 0, mmUVD_LMI_JPEG_VMID), 0, 0, PACKETJ_TYPE0));
+@@ -554,6 +561,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
+ .get_rptr = jpeg_v1_0_decode_ring_get_rptr,
+ .get_wptr = jpeg_v1_0_decode_ring_get_wptr,
+ .set_wptr = jpeg_v1_0_decode_ring_set_wptr,
++ .parse_cs = jpeg_v1_dec_ring_parse_cs,
+ .emit_frame_size =
+ 6 + 6 + /* hdp invalidate / flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+@@ -612,3 +620,69 @@ static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+
+ vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
+ }
++
++/**
++ * jpeg_v1_dec_ring_parse_cs - command submission parser
++ *
++ * @parser: Command submission parser context
++ * @job: the job to parse
++ * @ib: the IB to parse
++ *
++ * Parse the command stream, return -EINVAL for invalid packet,
++ * 0 otherwise
++ */
++static int jpeg_v1_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
++ struct amdgpu_job *job,
++ struct amdgpu_ib *ib)
++{
++ u32 i, reg, res, cond, type;
++ int ret = 0;
++ struct amdgpu_device *adev = parser->adev;
++
++ for (i = 0; i < ib->length_dw ; i += 2) {
++ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
++ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
++ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
++ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
++
++ if (res || cond != PACKETJ_CONDITION_CHECK0) /* only allow 0 for now */
++ return -EINVAL;
++
++ if (reg >= JPEG_V1_REG_RANGE_START && reg <= JPEG_V1_REG_RANGE_END)
++ continue;
++
++ switch (type) {
++ case PACKETJ_TYPE0:
++ if (reg != JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_HIGH &&
++ reg != JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_LOW &&
++ reg != JPEG_V1_LMI_JPEG_READ_64BIT_BAR_HIGH &&
++ reg != JPEG_V1_LMI_JPEG_READ_64BIT_BAR_LOW &&
++ reg != JPEG_V1_REG_CTX_INDEX &&
++ reg != JPEG_V1_REG_CTX_DATA) {
++ ret = -EINVAL;
++ }
++ break;
++ case PACKETJ_TYPE1:
++ if (reg != JPEG_V1_REG_CTX_DATA)
++ ret = -EINVAL;
++ break;
++ case PACKETJ_TYPE3:
++ if (reg != JPEG_V1_REG_SOFT_RESET)
++ ret = -EINVAL;
++ break;
++ case PACKETJ_TYPE6:
++ if (ib->ptr[i] != CP_PACKETJ_NOP)
++ ret = -EINVAL;
++ break;
++ default:
++ ret = -EINVAL;
++ }
++
++ if (ret) {
++ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
++ break;
++ }
++ }
++
++ return ret;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
+index bbf33a6a397298..9654d22e03763c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h
+@@ -29,4 +29,15 @@ int jpeg_v1_0_sw_init(void *handle);
+ void jpeg_v1_0_sw_fini(void *handle);
+ void jpeg_v1_0_start(struct amdgpu_device *adev, int mode);
+
++#define JPEG_V1_REG_RANGE_START 0x8000
++#define JPEG_V1_REG_RANGE_END 0x803f
++
++#define JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_HIGH 0x8238
++#define JPEG_V1_LMI_JPEG_WRITE_64BIT_BAR_LOW 0x8239
++#define JPEG_V1_LMI_JPEG_READ_64BIT_BAR_HIGH 0x825a
++#define JPEG_V1_LMI_JPEG_READ_64BIT_BAR_LOW 0x825b
++#define JPEG_V1_REG_CTX_INDEX 0x8328
++#define JPEG_V1_REG_CTX_DATA 0x8329
++#define JPEG_V1_REG_SOFT_RESET 0x83a0
++
+ #endif /*__JPEG_V1_0_H__*/
+diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+index 0050e0a06cbc24..9bde0c8bf914a6 100644
+--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
++++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
+@@ -143,32 +143,25 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
++ if (link_enc->funcs->fec_set_ready == NULL)
++ return DC_NOT_SUPPORTED;
+
+- if (!dp_should_enable_fec(link))
+- return status;
+-
+- if (link_enc->funcs->fec_set_ready &&
+- link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+- if (ready) {
+- fec_config = 1;
+- status = core_link_write_dpcd(link,
+- DP_FEC_CONFIGURATION,
+- &fec_config,
+- sizeof(fec_config));
+- if (status == DC_OK) {
+- link_enc->funcs->fec_set_ready(link_enc, true);
+- link->fec_state = dc_link_fec_ready;
+- } else {
+- link_enc->funcs->fec_set_ready(link_enc, false);
+- link->fec_state = dc_link_fec_not_ready;
+- dm_error("dpcd write failed to set fec_ready");
+- }
+- } else if (link->fec_state == dc_link_fec_ready) {
++ if (ready && dp_should_enable_fec(link)) {
++ fec_config = 1;
++
++ status = core_link_write_dpcd(link, DP_FEC_CONFIGURATION,
++ &fec_config, sizeof(fec_config));
++
++ if (status == DC_OK) {
++ link_enc->funcs->fec_set_ready(link_enc, true);
++ link->fec_state = dc_link_fec_ready;
++ }
++ } else {
++ if (link->fec_state == dc_link_fec_ready) {
+ fec_config = 0;
+- status = core_link_write_dpcd(link,
+- DP_FEC_CONFIGURATION,
+- &fec_config,
+- sizeof(fec_config));
++ core_link_write_dpcd(link, DP_FEC_CONFIGURATION,
++ &fec_config, sizeof(fec_config));
++
+ link_enc->funcs->fec_set_ready(link_enc, false);
+ link->fec_state = dc_link_fec_not_ready;
+ }
+@@ -183,14 +176,12 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+-
+- if (!dp_should_enable_fec(link))
++ if (link_enc->funcs->fec_set_enable == NULL)
+ return;
+
+- if (link_enc->funcs->fec_set_enable &&
+- link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+- if (link->fec_state == dc_link_fec_ready && enable) {
+- /* Accord to DP spec, FEC enable sequence can first
++ if (enable && dp_should_enable_fec(link)) {
++ if (link->fec_state == dc_link_fec_ready) {
++ /* According to DP spec, FEC enable sequence can first
+ * be transmitted anytime after 1000 LL codes have
+ * been transmitted on the link after link training
+ * completion. Using 1 lane RBR should have the maximum
+@@ -200,7 +191,9 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
+ udelay(7);
+ link_enc->funcs->fec_set_enable(link_enc, true);
+ link->fec_state = dc_link_fec_enabled;
+- } else if (link->fec_state == dc_link_fec_enabled && !enable) {
++ }
++ } else {
++ if (link->fec_state == dc_link_fec_enabled) {
+ link_enc->funcs->fec_set_enable(link_enc, false);
+ link->fec_state = dc_link_fec_ready;
+ }
+diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
+index 6c8b4afa7cc6eb..ccc79bdd4f5adf 100644
+--- a/drivers/gpu/drm/amd/include/atomfirmware.h
++++ b/drivers/gpu/drm/amd/include/atomfirmware.h
+@@ -1006,7 +1006,7 @@ struct display_object_info_table_v1_4
+ uint16_t supporteddevices;
+ uint8_t number_of_path;
+ uint8_t reserved;
+- struct atom_display_object_path_v2 display_path[8]; //the real number of this included in the structure is calculated by using the (whole structure size - the header size- number_of_path)/size of atom_display_object_path
++ struct atom_display_object_path_v2 display_path[]; //the real number of this included in the structure is calculated by using the (whole structure size - the header size- number_of_path)/size of atom_display_object_path
+ };
+
+ struct display_object_info_table_v1_5 {
+@@ -1016,7 +1016,7 @@ struct display_object_info_table_v1_5 {
+ uint8_t reserved;
+ // the real number of this included in the structure is calculated by using the
+ // (whole structure size - the header size- number_of_path)/size of atom_display_object_path
+- struct atom_display_object_path_v3 display_path[8];
++ struct atom_display_object_path_v3 display_path[];
+ };
+
+ /*
+diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+index 039da0d1a613b7..5b2506c65e9520 100644
+--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
++++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
+@@ -208,6 +208,18 @@ static const struct dmi_system_id orientation_data[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "KUN"),
+ },
+ .driver_data = (void *)&lcd1600x2560_rightside_up,
++ }, { /* AYN Loki Max */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Loki Max"),
++ },
++ .driver_data = (void *)&lcd1080x1920_leftside_up,
++ }, { /* AYN Loki Zero */
++ .matches = {
++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Loki Zero"),
++ },
++ .driver_data = (void *)&lcd1080x1920_leftside_up,
+ }, { /* Chuwi HiBook (CWI514) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
+index 5860428da8de87..7b4ed5ca0a9bd2 100644
+--- a/drivers/gpu/drm/drm_syncobj.c
++++ b/drivers/gpu/drm/drm_syncobj.c
+@@ -1421,6 +1421,7 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+ struct drm_syncobj *syncobj;
+ struct eventfd_ctx *ev_fd_ctx;
+ struct syncobj_eventfd_entry *entry;
++ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+@@ -1436,13 +1437,15 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+ return -ENOENT;
+
+ ev_fd_ctx = eventfd_ctx_fdget(args->fd);
+- if (IS_ERR(ev_fd_ctx))
+- return PTR_ERR(ev_fd_ctx);
++ if (IS_ERR(ev_fd_ctx)) {
++ ret = PTR_ERR(ev_fd_ctx);
++ goto err_fdget;
++ }
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+- eventfd_ctx_put(ev_fd_ctx);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto err_kzalloc;
+ }
+ entry->syncobj = syncobj;
+ entry->ev_fd_ctx = ev_fd_ctx;
+@@ -1453,6 +1456,12 @@ drm_syncobj_eventfd_ioctl(struct drm_device *dev, void *data,
+ drm_syncobj_put(syncobj);
+
+ return 0;
++
++err_kzalloc:
++ eventfd_ctx_put(ev_fd_ctx);
++err_fdget:
++ drm_syncobj_put(syncobj);
++ return ret;
+ }
+
+ int
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+index b5de5a9f596715..236dfff81fea43 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+@@ -2695,9 +2695,9 @@ static void prepare_context_registration_info_v70(struct intel_context *ce,
+ ce->parallel.guc.wqi_tail = 0;
+ ce->parallel.guc.wqi_head = 0;
+
+- wq_desc_offset = i915_ggtt_offset(ce->state) +
++ wq_desc_offset = (u64)i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce);
+- wq_base_offset = i915_ggtt_offset(ce->state) +
++ wq_base_offset = (u64)i915_ggtt_offset(ce->state) +
+ __get_wq_offset(ce);
+ info->wq_desc_lo = lower_32_bits(wq_desc_offset);
+ info->wq_desc_hi = upper_32_bits(wq_desc_offset);
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index 8090dde0328082..96deaf85c0cd27 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -99,7 +99,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
+ * was a bad idea, and is only provided for backwards
+ * compatibility for older targets.
+ */
+- return -ENODEV;
++ return -ENOENT;
+ }
+
+ if (IS_ERR(fw)) {
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+index 50f0c1914f58e8..4c3f7439657987 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+@@ -46,6 +46,8 @@ u32 gm107_ram_probe_fbp(const struct nvkm_ram_func *,
+ u32 gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
+ struct nvkm_device *, int, int *);
+
++int gp100_ram_init(struct nvkm_ram *);
++
+ /* RAM type-specific MR calculation routines */
+ int nvkm_sddr2_calc(struct nvkm_ram *);
+ int nvkm_sddr3_calc(struct nvkm_ram *);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+index 378f6fb7099077..8987a21e81d174 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+@@ -27,7 +27,7 @@
+ #include <subdev/bios/init.h>
+ #include <subdev/bios/rammap.h>
+
+-static int
++int
+ gp100_ram_init(struct nvkm_ram *ram)
+ {
+ struct nvkm_subdev *subdev = &ram->fb->subdev;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c
+index 8550f5e473474b..b6b6ee59019d70 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp102.c
+@@ -5,6 +5,7 @@
+
+ static const struct nvkm_ram_func
+ gp102_ram = {
++ .init = gp100_ram_init,
+ };
+
+ int
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 4246348ca16e99..a5987fafbedde4 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -515,6 +515,8 @@
+ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
+
+ #define I2C_VENDOR_ID_GOODIX 0x27c6
++#define I2C_DEVICE_ID_GOODIX_01E8 0x01e8
++#define I2C_DEVICE_ID_GOODIX_01E9 0x01e9
+ #define I2C_DEVICE_ID_GOODIX_01F0 0x01f0
+
+ #define USB_VENDOR_ID_GOODTOUCH 0x1aad
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 17efe6e2a1a44c..8ef41d6e71d421 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -1442,6 +1442,30 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
+ return 0;
+ }
+
++static __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc,
++ unsigned int *size)
++{
++ if (hdev->vendor == I2C_VENDOR_ID_GOODIX &&
++ (hdev->product == I2C_DEVICE_ID_GOODIX_01E8 ||
++ hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) {
++ if (rdesc[607] == 0x15) {
++ rdesc[607] = 0x25;
++ dev_info(
++ &hdev->dev,
++ "GT7868Q report descriptor fixup is applied.\n");
++ } else {
++ dev_info(
++ &hdev->dev,
++ "The byte is not expected for fixing the report descriptor. \
++It's possible that the touchpad firmware is not suitable for applying the fix. \
++got: %x\n",
++ rdesc[607]);
++ }
++ }
++
++ return rdesc;
++}
++
+ static void mt_report(struct hid_device *hid, struct hid_report *report)
+ {
+ struct mt_device *td = hid_get_drvdata(hid);
+@@ -2038,6 +2062,14 @@ static const struct hid_device_id mt_devices[] = {
+ MT_BT_DEVICE(USB_VENDOR_ID_FRUCTEL,
+ USB_DEVICE_ID_GAMETEL_MT_MODE) },
+
++ /* Goodix GT7868Q devices */
++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
++ HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
++ I2C_DEVICE_ID_GOODIX_01E8) },
++ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU,
++ HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX,
++ I2C_DEVICE_ID_GOODIX_01E8) },
++
+ /* GoodTouch panels */
+ { .driver_data = MT_CLS_NSMU,
+ MT_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
+@@ -2273,6 +2305,7 @@ static struct hid_driver mt_driver = {
+ .feature_mapping = mt_feature_mapping,
+ .usage_table = mt_grabbed_usages,
+ .event = mt_event,
++ .report_fixup = mt_report_fixup,
+ .report = mt_report,
+ #ifdef CONFIG_PM
+ .suspend = mt_suspend,
+diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
+index b0832a4c690d7f..76c2b364c3fe40 100644
+--- a/drivers/hwmon/pmbus/pmbus.h
++++ b/drivers/hwmon/pmbus/pmbus.h
+@@ -409,6 +409,12 @@ enum pmbus_sensor_classes {
+ enum pmbus_data_format { linear = 0, ieee754, direct, vid };
+ enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv };
+
++/* PMBus revision identifiers */
++#define PMBUS_REV_10 0x00 /* PMBus revision 1.0 */
++#define PMBUS_REV_11 0x11 /* PMBus revision 1.1 */
++#define PMBUS_REV_12 0x22 /* PMBus revision 1.2 */
++#define PMBUS_REV_13 0x33 /* PMBus revision 1.3 */
++
+ struct pmbus_driver_info {
+ int pages; /* Total number of pages */
+ u8 phases[PMBUS_PAGES]; /* Number of phases per page */
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index 1363d9f89181d2..728c07c42651ce 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -85,6 +85,8 @@ struct pmbus_data {
+
+ u32 flags; /* from platform data */
+
++ u8 revision; /* The PMBus revision the device is compliant with */
++
+ int exponent[PMBUS_PAGES];
+ /* linear mode: exponent for output voltages */
+
+@@ -1095,9 +1097,14 @@ static int pmbus_get_boolean(struct i2c_client *client, struct pmbus_boolean *b,
+
+ regval = status & mask;
+ if (regval) {
+- ret = _pmbus_write_byte_data(client, page, reg, regval);
+- if (ret)
+- goto unlock;
++ if (data->revision >= PMBUS_REV_12) {
++ ret = _pmbus_write_byte_data(client, page, reg, regval);
++ if (ret)
++ goto unlock;
++ } else {
++ pmbus_clear_fault_page(client, page);
++ }
++
+ }
+ if (s1 && s2) {
+ s64 v1, v2;
+@@ -2640,6 +2647,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
+ data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK;
+ }
+
++ ret = i2c_smbus_read_byte_data(client, PMBUS_REVISION);
++ if (ret >= 0)
++ data->revision = ret;
++
+ if (data->info->pages)
+ pmbus_clear_faults(client);
+ else
+diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
+index 2976c62b58c075..d2fe0269b6d3af 100644
+--- a/drivers/iio/adc/ad7124.c
++++ b/drivers/iio/adc/ad7124.c
+@@ -14,7 +14,8 @@
+ #include <linux/kernel.h>
+ #include <linux/kfifo.h>
+ #include <linux/module.h>
+-#include <linux/of.h>
++#include <linux/mod_devicetable.h>
++#include <linux/property.h>
+ #include <linux/regulator/consumer.h>
+ #include <linux/spi/spi.h>
+
+@@ -812,22 +813,19 @@ static int ad7124_check_chip_id(struct ad7124_state *st)
+ return 0;
+ }
+
+-static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
+- struct device_node *np)
++static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
++ struct device *dev)
+ {
+ struct ad7124_state *st = iio_priv(indio_dev);
+ struct ad7124_channel_config *cfg;
+ struct ad7124_channel *channels;
+- struct device_node *child;
+ struct iio_chan_spec *chan;
+ unsigned int ain[2], channel = 0, tmp;
+ int ret;
+
+- st->num_channels = of_get_available_child_count(np);
+- if (!st->num_channels) {
+- dev_err(indio_dev->dev.parent, "no channel children\n");
+- return -ENODEV;
+- }
++ st->num_channels = device_get_child_node_count(dev);
++ if (!st->num_channels)
++ return dev_err_probe(dev, -ENODEV, "no channel children\n");
+
+ chan = devm_kcalloc(indio_dev->dev.parent, st->num_channels,
+ sizeof(*chan), GFP_KERNEL);
+@@ -843,39 +841,37 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
+ indio_dev->num_channels = st->num_channels;
+ st->channels = channels;
+
+- for_each_available_child_of_node(np, child) {
+- cfg = &st->channels[channel].cfg;
+-
+- ret = of_property_read_u32(child, "reg", &channel);
++ device_for_each_child_node_scoped(dev, child) {
++ ret = fwnode_property_read_u32(child, "reg", &channel);
+ if (ret)
+- goto err;
++ return ret;
+
+- if (channel >= indio_dev->num_channels) {
+- dev_err(indio_dev->dev.parent,
++ if (channel >= indio_dev->num_channels)
++ return dev_err_probe(dev, -EINVAL,
+ "Channel index >= number of channels\n");
+- ret = -EINVAL;
+- goto err;
+- }
+
+- ret = of_property_read_u32_array(child, "diff-channels",
+- ain, 2);
++ ret = fwnode_property_read_u32_array(child, "diff-channels",
++ ain, 2);
+ if (ret)
+- goto err;
++ return ret;
+
+ st->channels[channel].nr = channel;
+ st->channels[channel].ain = AD7124_CHANNEL_AINP(ain[0]) |
+ AD7124_CHANNEL_AINM(ain[1]);
+
+- cfg->bipolar = of_property_read_bool(child, "bipolar");
++ cfg = &st->channels[channel].cfg;
++ cfg->bipolar = fwnode_property_read_bool(child, "bipolar");
+
+- ret = of_property_read_u32(child, "adi,reference-select", &tmp);
++ ret = fwnode_property_read_u32(child, "adi,reference-select", &tmp);
+ if (ret)
+ cfg->refsel = AD7124_INT_REF;
+ else
+ cfg->refsel = tmp;
+
+- cfg->buf_positive = of_property_read_bool(child, "adi,buffered-positive");
+- cfg->buf_negative = of_property_read_bool(child, "adi,buffered-negative");
++ cfg->buf_positive =
++ fwnode_property_read_bool(child, "adi,buffered-positive");
++ cfg->buf_negative =
++ fwnode_property_read_bool(child, "adi,buffered-negative");
+
+ chan[channel] = ad7124_channel_template;
+ chan[channel].address = channel;
+@@ -885,10 +881,6 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev,
+ }
+
+ return 0;
+-err:
+- of_node_put(child);
+-
+- return ret;
+ }
+
+ static int ad7124_setup(struct ad7124_state *st)
+@@ -948,9 +940,7 @@ static int ad7124_probe(struct spi_device *spi)
+ struct iio_dev *indio_dev;
+ int i, ret;
+
+- info = of_device_get_match_data(&spi->dev);
+- if (!info)
+- info = (void *)spi_get_device_id(spi)->driver_data;
++ info = spi_get_device_match_data(spi);
+ if (!info)
+ return -ENODEV;
+
+@@ -970,7 +960,7 @@ static int ad7124_probe(struct spi_device *spi)
+ if (ret < 0)
+ return ret;
+
+- ret = ad7124_of_parse_channel_config(indio_dev, spi->dev.of_node);
++ ret = ad7124_parse_channel_config(indio_dev, &spi->dev);
+ if (ret < 0)
+ return ret;
+
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 45a497c0258b30..2d179bc56ce608 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -444,7 +444,7 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_NDR;
+ break;
+- case MLX5E_PROT_MASK(MLX5E_400GAUI_8):
++ case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8):
+ *active_width = IB_WIDTH_8X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 7a303a9d6bf72b..cff3393f0dd000 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -189,6 +189,7 @@ static const char * const smbus_pnp_ids[] = {
+ "LEN2054", /* E480 */
+ "LEN2055", /* E580 */
+ "LEN2068", /* T14 Gen 1 */
++ "SYN3015", /* HP EliteBook 840 G2 */
+ "SYN3052", /* HP EliteBook 840 G4 */
+ "SYN3221", /* HP 15-ay000 */
+ "SYN323d", /* HP Spectre X360 13-w013dx */
+diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
+index e9eb9554dd7bdc..bad238f69a7afd 100644
+--- a/drivers/input/serio/i8042-acpipnpio.h
++++ b/drivers/input/serio/i8042-acpipnpio.h
+@@ -627,6 +627,15 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
+ },
++ {
++ /* Fujitsu Lifebook E756 */
++ /* https://bugzilla.suse.com/show_bug.cgi?id=1229056 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E756"),
++ },
++ .driver_data = (void *)(SERIO_QUIRK_NOMUX)
++ },
+ {
+ /* Fujitsu Lifebook E5411 */
+ .matches = {
+diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
+index a66375700a630d..8b8c43b3c27f29 100644
+--- a/drivers/input/touchscreen/ads7846.c
++++ b/drivers/input/touchscreen/ads7846.c
+@@ -808,7 +808,7 @@ static void ads7846_read_state(struct ads7846 *ts)
+ m = &ts->msg[msg_idx];
+ error = spi_sync(ts->spi, m);
+ if (error) {
+- dev_err(&ts->spi->dev, "spi_sync --> %d\n", error);
++ dev_err_ratelimited(&ts->spi->dev, "spi_sync --> %d\n", error);
+ packet->ignore = true;
+ return;
+ }
+diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
+index 470add73f7bdac..a36dd749c688e1 100644
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -2183,6 +2183,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ unsigned int journal_section, journal_entry;
+ unsigned int journal_read_pos;
++ sector_t recalc_sector;
+ struct completion read_comp;
+ bool discard_retried = false;
+ bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
+@@ -2323,6 +2324,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+ goto lock_retry;
+ }
+ }
++ recalc_sector = le64_to_cpu(ic->sb->recalc_sector);
+ spin_unlock_irq(&ic->endio_wait.lock);
+
+ if (unlikely(journal_read_pos != NOT_FOUND)) {
+@@ -2377,7 +2379,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
+ if (need_sync_io) {
+ wait_for_completion_io(&read_comp);
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+- dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
++ dio->range.logical_sector + dio->range.n_sectors > recalc_sector)
+ goto skip_check;
+ if (ic->mode == 'B') {
+ if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
+diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c
+index f1f766b709657b..4eddc5ba1af9c8 100644
+--- a/drivers/misc/eeprom/digsy_mtc_eeprom.c
++++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c
+@@ -42,7 +42,7 @@ static void digsy_mtc_op_finish(void *p)
+ }
+
+ struct eeprom_93xx46_platform_data digsy_mtc_eeprom_data = {
+- .flags = EE_ADDR8,
++ .flags = EE_ADDR8 | EE_SIZE1K,
+ .prepare = digsy_mtc_op_prepare,
+ .finish = digsy_mtc_op_finish,
+ };
+diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
+index 3c5509e75a5486..afb5dae4439ce6 100644
+--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
++++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
+@@ -1474,10 +1474,13 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
+ /* Hardware errata - Admin config could not be overwritten if
+ * config is pending, need reset the TAS module
+ */
+- val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
+- if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
+- ret = -EBUSY;
+- goto err_reset_tc;
++ val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
++ if (val & QSYS_TAG_CONFIG_ENABLE) {
++ val = ocelot_read(ocelot, QSYS_PARAM_STATUS_REG_8);
++ if (val & QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING) {
++ ret = -EBUSY;
++ goto err_reset_tc;
++ }
+ }
+
+ ocelot_rmw_rix(ocelot,
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
+index 63b3e02fab162e..4968f6f0bdbc25 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.h
++++ b/drivers/net/ethernet/faraday/ftgmac100.h
+@@ -84,7 +84,7 @@
+ FTGMAC100_INT_RPKT_BUF)
+
+ /* All the interrupts we care about */
+-#define FTGMAC100_INT_ALL (FTGMAC100_INT_RPKT_BUF | \
++#define FTGMAC100_INT_ALL (FTGMAC100_INT_RXTX | \
+ FTGMAC100_INT_BAD)
+
+ /*
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index c6a3eefd83bff9..e7bf70ac9a4ca5 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -2285,12 +2285,12 @@ static netdev_tx_t
+ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+ {
+ const int queue_mapping = skb_get_queue_mapping(skb);
+- bool nonlinear = skb_is_nonlinear(skb);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa_percpu_priv *percpu_priv;
+ struct netdev_queue *txq;
+ struct dpaa_priv *priv;
+ struct qm_fd fd;
++ bool nonlinear;
+ int offset = 0;
+ int err = 0;
+
+@@ -2300,6 +2300,13 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+
+ qm_fd_clear_fd(&fd);
+
++ /* Packet data is always read as 32-bit words, so zero out any part of
++ * the skb which might be sent if we have to pad the packet
++ */
++ if (__skb_put_padto(skb, ETH_ZLEN, false))
++ goto enomem;
++
++ nonlinear = skb_is_nonlinear(skb);
+ if (!nonlinear) {
+ /* We're going to store the skb backpointer at the beginning
+ * of the data buffer, so we need a privately owned skb
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 92c592c177e67a..9650ce594e2fdd 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -11370,7 +11370,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
+
+ pcim_iounmap(pdev, hdev->hw.hw.io_base);
+ pci_free_irq_vectors(pdev);
+- pci_release_mem_regions(pdev);
++ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ }
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index b3010a53f1b457..3a0ef56d3edcac 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -2600,13 +2600,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+- /* The Rx rule will only exist to remove if the LLDP FW
+- * engine is currently stopped
+- */
+- if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
+- !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+- ice_cfg_sw_lldp(vsi, false, false);
+-
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ if (err)
+@@ -2953,6 +2946,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
+ ice_rss_clean(vsi);
+
+ ice_vsi_close(vsi);
++
++ /* The Rx rule will only exist to remove if the LLDP FW
++ * engine is currently stopped
++ */
++ if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
++ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
++ ice_cfg_sw_lldp(vsi, false, false);
++
+ ice_vsi_decfg(vsi);
+
+ /* retain SW VSI data structure since it is needed to unregister and
+diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
+index 88ee2491312a55..355716e6bcc82d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_switch.c
++++ b/drivers/net/ethernet/intel/ice/ice_switch.c
+@@ -3072,7 +3072,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
+
+ /* A rule already exists with the new VSI being added */
+ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
+- return 0;
++ return -EEXIST;
+
+ /* Update the previously created VSI list set with
+ * the new VSI ID passed in
+@@ -3142,7 +3142,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
+
+ list_head = &sw->recp_list[recp_id].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+- if (list_itr->vsi_list_info) {
++ if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
+ map_info = list_itr->vsi_list_info;
+ if (test_bit(vsi_handle, map_info->vsi_map)) {
+ *vsi_list_id = map_info->vsi_list_id;
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index fa268d7bd1bc3c..986bcbf0a6abaf 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -33,6 +33,7 @@
+ #include <linux/bpf_trace.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/etherdevice.h>
++#include <linux/lockdep.h>
+ #ifdef CONFIG_IGB_DCA
+ #include <linux/dca.h>
+ #endif
+@@ -2939,8 +2940,11 @@ static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+ }
+ }
+
++/* This function assumes __netif_tx_lock is held by the caller. */
+ static void igb_xdp_ring_update_tail(struct igb_ring *ring)
+ {
++ lockdep_assert_held(&txring_txq(ring)->_xmit_lock);
++
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+@@ -3025,11 +3029,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
+ nxmit++;
+ }
+
+- __netif_tx_unlock(nq);
+-
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ igb_xdp_ring_update_tail(tx_ring);
+
++ __netif_tx_unlock(nq);
++
+ return nxmit;
+ }
+
+@@ -8889,12 +8893,14 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
+
+ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+ {
++ unsigned int total_bytes = 0, total_packets = 0;
+ struct igb_adapter *adapter = q_vector->adapter;
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+- struct sk_buff *skb = rx_ring->skb;
+- unsigned int total_bytes = 0, total_packets = 0;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
++ struct sk_buff *skb = rx_ring->skb;
++ int cpu = smp_processor_id();
+ unsigned int xdp_xmit = 0;
++ struct netdev_queue *nq;
+ struct xdp_buff xdp;
+ u32 frame_sz = 0;
+ int rx_buf_pgcnt;
+@@ -9022,7 +9028,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+ if (xdp_xmit & IGB_XDP_TX) {
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+
++ nq = txring_txq(tx_ring);
++ __netif_tx_lock(nq, cpu);
+ igb_xdp_ring_update_tail(tx_ring);
++ __netif_tx_unlock(nq);
+ }
+
+ u64_stats_update_begin(&rx_ring->rx_syncp);
+diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
+index 1732ec3c3dbdc4..a718207988f2c4 100644
+--- a/drivers/net/ethernet/jme.c
++++ b/drivers/net/ethernet/jme.c
+@@ -946,15 +946,13 @@ jme_udpsum(struct sk_buff *skb)
+ if (skb->protocol != htons(ETH_P_IP))
+ return csum;
+ skb_set_network_header(skb, ETH_HLEN);
+- if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
+- (skb->len < (ETH_HLEN +
+- (ip_hdr(skb)->ihl << 2) +
+- sizeof(struct udphdr)))) {
++
++ if (ip_hdr(skb)->protocol != IPPROTO_UDP ||
++ skb->len < (ETH_HLEN + ip_hdrlen(skb) + sizeof(struct udphdr))) {
+ skb_reset_network_header(skb);
+ return csum;
+ }
+- skb_set_transport_header(skb,
+- ETH_HLEN + (ip_hdr(skb)->ihl << 2));
++ skb_set_transport_header(skb, ETH_HLEN + ip_hdrlen(skb));
+ csum = udp_hdr(skb)->check;
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+index 185c296eaaf0d4..e81cfcaf9ce4fe 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+@@ -290,6 +290,7 @@ struct nix_mark_format {
+
+ /* smq(flush) to tl1 cir/pir info */
+ struct nix_smq_tree_ctx {
++ u16 schq;
+ u64 cir_off;
+ u64 cir_val;
+ u64 pir_off;
+@@ -299,8 +300,6 @@ struct nix_smq_tree_ctx {
+ /* smq flush context */
+ struct nix_smq_flush_ctx {
+ int smq;
+- u16 tl1_schq;
+- u16 tl2_schq;
+ struct nix_smq_tree_ctx smq_tree_ctx[NIX_TXSCH_LVL_CNT];
+ };
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index a07e5c8786c4b4..224a025283ca7d 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -2146,14 +2146,13 @@ static void nix_smq_flush_fill_ctx(struct rvu *rvu, int blkaddr, int smq,
+ schq = smq;
+ for (lvl = NIX_TXSCH_LVL_SMQ; lvl <= NIX_TXSCH_LVL_TL1; lvl++) {
+ smq_tree_ctx = &smq_flush_ctx->smq_tree_ctx[lvl];
++ smq_tree_ctx->schq = schq;
+ if (lvl == NIX_TXSCH_LVL_TL1) {
+- smq_flush_ctx->tl1_schq = schq;
+ smq_tree_ctx->cir_off = NIX_AF_TL1X_CIR(schq);
+ smq_tree_ctx->pir_off = 0;
+ smq_tree_ctx->pir_val = 0;
+ parent_off = 0;
+ } else if (lvl == NIX_TXSCH_LVL_TL2) {
+- smq_flush_ctx->tl2_schq = schq;
+ smq_tree_ctx->cir_off = NIX_AF_TL2X_CIR(schq);
+ smq_tree_ctx->pir_off = NIX_AF_TL2X_PIR(schq);
+ parent_off = NIX_AF_TL2X_PARENT(schq);
+@@ -2188,8 +2187,8 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
+ {
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
++ int tl2, tl2_schq;
+ u64 regoff;
+- int tl2;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+@@ -2197,16 +2196,17 @@ static void nix_smq_flush_enadis_xoff(struct rvu *rvu, int blkaddr,
+
+ /* loop through all TL2s with matching PF_FUNC */
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2];
++ tl2_schq = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL2].schq;
+ for (tl2 = 0; tl2 < txsch->schq.max; tl2++) {
+ /* skip the smq(flush) TL2 */
+- if (tl2 == smq_flush_ctx->tl2_schq)
++ if (tl2 == tl2_schq)
+ continue;
+ /* skip unused TL2s */
+ if (TXSCH_MAP_FLAGS(txsch->pfvf_map[tl2]) & NIX_TXSCHQ_FREE)
+ continue;
+ /* skip if PF_FUNC doesn't match */
+ if ((TXSCH_MAP_FUNC(txsch->pfvf_map[tl2]) & ~RVU_PFVF_FUNC_MASK) !=
+- (TXSCH_MAP_FUNC(txsch->pfvf_map[smq_flush_ctx->tl2_schq] &
++ (TXSCH_MAP_FUNC(txsch->pfvf_map[tl2_schq] &
+ ~RVU_PFVF_FUNC_MASK)))
+ continue;
+ /* enable/disable XOFF */
+@@ -2248,10 +2248,12 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ int smq, u16 pcifunc, int nixlf)
+ {
+ struct nix_smq_flush_ctx *smq_flush_ctx;
++ int err, restore_tx_en = 0, i;
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id = 0, lmac_id = 0;
+- int err, restore_tx_en = 0;
+- u64 cfg;
++ u16 tl2_tl3_link_schq;
++ u8 link, link_level;
++ u64 cfg, bmap = 0;
+
+ if (!is_rvu_otx2(rvu)) {
+ /* Skip SMQ flush if pkt count is zero */
+@@ -2275,16 +2277,38 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, true);
+ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, false);
+
+- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
+- /* Do SMQ flush and set enqueue xoff */
+- cfg |= BIT_ULL(50) | BIT_ULL(49);
+- rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
+-
+ /* Disable backpressure from physical link,
+ * otherwise SMQ flush may stall.
+ */
+ rvu_cgx_enadis_rx_bp(rvu, pf, false);
+
++ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ?
++ NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
++ tl2_tl3_link_schq = smq_flush_ctx->smq_tree_ctx[link_level].schq;
++ link = smq_flush_ctx->smq_tree_ctx[NIX_TXSCH_LVL_TL1].schq;
++
++ /* SMQ set enqueue xoff */
++ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
++ cfg |= BIT_ULL(50);
++ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
++
++ /* Clear all NIX_AF_TL3_TL2_LINK_CFG[ENA] for the TL3/TL2 queue */
++ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
++ cfg = rvu_read64(rvu, blkaddr,
++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
++ if (!(cfg & BIT_ULL(12)))
++ continue;
++ bmap |= (1 << i);
++ cfg &= ~BIT_ULL(12);
++ rvu_write64(rvu, blkaddr,
++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
++ }
++
++ /* Do SMQ flush and set enqueue xoff */
++ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
++ cfg |= BIT_ULL(50) | BIT_ULL(49);
++ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq), cfg);
++
+ /* Wait for flush to complete */
+ err = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(smq), BIT_ULL(49), true);
+@@ -2293,6 +2317,17 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ "NIXLF%d: SMQ%d flush failed, txlink might be busy\n",
+ nixlf, smq);
+
++ /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
++ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
++ if (!(bmap & (1 << i)))
++ continue;
++ cfg = rvu_read64(rvu, blkaddr,
++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
++ cfg |= BIT_ULL(12);
++ rvu_write64(rvu, blkaddr,
++ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
++ }
++
+ /* clear XOFF on TL2s */
+ nix_smq_flush_enadis_rate(rvu, blkaddr, smq_flush_ctx, true);
+ nix_smq_flush_enadis_xoff(rvu, blkaddr, smq_flush_ctx, false);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+index 50db127e6371bb..54379297a7489e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -136,6 +136,10 @@ void mlx5e_build_ptys2ethtool_map(void)
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, legacy,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
++ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100BASE_TX, legacy,
++ ETHTOOL_LINK_MODE_100baseT_Full_BIT);
++ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_T, legacy,
++ ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, legacy,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, legacy,
+@@ -201,6 +205,12 @@ void mlx5e_build_ptys2ethtool_map(void)
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT);
++ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_8_400GBASE_CR8, ext,
++ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
++ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
++ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
++ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT,
++ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_1_100GBASE_CR_KR, ext,
+ ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+index 255bc8b749f9a5..8587cd572da536 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+@@ -319,7 +319,7 @@ int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
+ return -EPERM;
+
+ mutex_lock(&esw->state_lock);
+- if (esw->mode != MLX5_ESWITCH_LEGACY) {
++ if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+@@ -339,7 +339,7 @@ int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
+ if (!mlx5_esw_allowed(esw))
+ return -EPERM;
+
+- if (esw->mode != MLX5_ESWITCH_LEGACY)
++ if (esw->mode != MLX5_ESWITCH_LEGACY || !mlx5_esw_is_fdb_created(esw))
+ return -EOPNOTSUPP;
+
+ *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+index 1887a24ee414d0..cc0f2be21a265a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+@@ -311,6 +311,25 @@ static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw,
+ return err;
+ }
+
++static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
++{
++ switch (type) {
++ case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
++ return MLX5_CAP_QOS(dev, esw_element_type) &
++ ELEMENT_TYPE_CAP_MASK_TSAR;
++ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
++ return MLX5_CAP_QOS(dev, esw_element_type) &
++ ELEMENT_TYPE_CAP_MASK_VPORT;
++ case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
++ return MLX5_CAP_QOS(dev, esw_element_type) &
++ ELEMENT_TYPE_CAP_MASK_VPORT_TC;
++ case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
++ return MLX5_CAP_QOS(dev, esw_element_type) &
++ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
++ }
++ return false;
++}
++
+ static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ u32 max_rate, u32 bw_share)
+@@ -322,6 +341,9 @@ static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw,
+ void *vport_elem;
+ int err;
+
++ if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT))
++ return -EOPNOTSUPP;
++
+ parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
+@@ -420,6 +442,7 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex
+ {
+ u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+ struct mlx5_esw_rate_group *group;
++ __be32 *attr;
+ u32 divider;
+ int err;
+
+@@ -427,6 +450,12 @@ __esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *ex
+ if (!group)
+ return ERR_PTR(-ENOMEM);
+
++ MLX5_SET(scheduling_context, tsar_ctx, element_type,
++ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
++
++ attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
++ *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
++
+ MLX5_SET(scheduling_context, tsar_ctx, parent_element_id,
+ esw->qos.root_tsar_ix);
+ err = mlx5_create_scheduling_element_cmd(esw->dev,
+@@ -525,25 +554,6 @@ static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw,
+ return err;
+ }
+
+-static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
+-{
+- switch (type) {
+- case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
+- return MLX5_CAP_QOS(dev, esw_element_type) &
+- ELEMENT_TYPE_CAP_MASK_TASR;
+- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
+- return MLX5_CAP_QOS(dev, esw_element_type) &
+- ELEMENT_TYPE_CAP_MASK_VPORT;
+- case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
+- return MLX5_CAP_QOS(dev, esw_element_type) &
+- ELEMENT_TYPE_CAP_MASK_VPORT_TC;
+- case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
+- return MLX5_CAP_QOS(dev, esw_element_type) &
+- ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
+- }
+- return false;
+-}
+-
+ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack)
+ {
+ u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+@@ -554,7 +564,8 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
+ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
+ return -EOPNOTSUPP;
+
+- if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
++ if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR) ||
++ !(MLX5_CAP_QOS(dev, esw_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(scheduling_context, tsar_ctx, element_type,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 11f11248feb8b7..96136229b1b070 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -2205,6 +2205,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
+ { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
+ { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
+ { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
++ { PCI_VDEVICE(MELLANOX, 0x1025) }, /* ConnectX-9 */
+ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
+ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
+ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+index be70d1f23a5da3..749f0fc2c189ad 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+@@ -1098,7 +1098,7 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
+ [MLX5E_CAUI_4_100GBASE_CR4_KR4] = 100000,
+ [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000,
+ [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000,
+- [MLX5E_400GAUI_8] = 400000,
++ [MLX5E_400GAUI_8_400GBASE_CR8] = 400000,
+ [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000,
+ [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000,
+ [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000,
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+index 8bce730b5c5bef..db2bd3ad63ba36 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
+@@ -28,6 +28,9 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ {
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+
++ if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP))
++ return -EOPNOTSUPP;
++
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP);
+@@ -44,6 +47,10 @@ int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ void *attr;
+
++ if (!(MLX5_CAP_QOS(mdev, nic_element_type) & ELEMENT_TYPE_CAP_MASK_TSAR) ||
++ !(MLX5_CAP_QOS(mdev, nic_tsar_type) & TSAR_TYPE_CAP_MASK_DWRR))
++ return -EOPNOTSUPP;
++
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+index f09f10f17d7eaa..2facbdfbb319e7 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
+@@ -419,6 +419,8 @@ struct axidma_bd {
+ * @tx_bytes: TX byte count for statistics
+ * @tx_stat_sync: Synchronization object for TX stats
+ * @dma_err_task: Work structure to process Axi DMA errors
++ * @stopping: Set when @dma_err_task shouldn't do anything because we are
++ * about to stop the device.
+ * @tx_irq: Axidma TX IRQ number
+ * @rx_irq: Axidma RX IRQ number
+ * @eth_irq: Ethernet core IRQ number
+@@ -481,6 +483,7 @@ struct axienet_local {
+ struct u64_stats_sync tx_stat_sync;
+
+ struct work_struct dma_err_task;
++ bool stopping;
+
+ int tx_irq;
+ int rx_irq;
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 144feb7a2fdac6..65d7aaad43fe90 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1162,6 +1162,7 @@ static int axienet_open(struct net_device *ndev)
+ phylink_start(lp->phylink);
+
+ /* Enable worker thread for Axi DMA error handling */
++ lp->stopping = false;
+ INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
+
+ napi_enable(&lp->napi_rx);
+@@ -1217,6 +1218,9 @@ static int axienet_stop(struct net_device *ndev)
+
+ dev_dbg(&ndev->dev, "axienet_close()\n");
+
++ WRITE_ONCE(lp->stopping, true);
++ flush_work(&lp->dma_err_task);
++
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
+
+@@ -1761,6 +1765,10 @@ static void axienet_dma_err_handler(struct work_struct *work)
+ dma_err_task);
+ struct net_device *ndev = lp->ndev;
+
++ /* Don't bother if we are going to stop anyway */
++ if (READ_ONCE(lp->stopping))
++ return;
++
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
+
+diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
+index 897b979ec03c81..3b5fcaf0dd36db 100644
+--- a/drivers/net/phy/vitesse.c
++++ b/drivers/net/phy/vitesse.c
+@@ -237,16 +237,6 @@ static int vsc739x_config_init(struct phy_device *phydev)
+ return 0;
+ }
+
+-static int vsc73xx_config_aneg(struct phy_device *phydev)
+-{
+- /* The VSC73xx switches does not like to be instructed to
+- * do autonegotiation in any way, it prefers that you just go
+- * with the power-on/reset defaults. Writing some registers will
+- * just make autonegotiation permanently fail.
+- */
+- return 0;
+-}
+-
+ /* This adds a skew for both TX and RX clocks, so the skew should only be
+ * applied to "rgmii-id" interfaces. It may not work as expected
+ * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces.
+@@ -444,7 +434,6 @@ static struct phy_driver vsc82xx_driver[] = {
+ .phy_id_mask = 0x000ffff0,
+ /* PHY_GBIT_FEATURES */
+ .config_init = vsc738x_config_init,
+- .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+ }, {
+@@ -453,7 +442,6 @@ static struct phy_driver vsc82xx_driver[] = {
+ .phy_id_mask = 0x000ffff0,
+ /* PHY_GBIT_FEATURES */
+ .config_init = vsc738x_config_init,
+- .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+ }, {
+@@ -462,7 +450,6 @@ static struct phy_driver vsc82xx_driver[] = {
+ .phy_id_mask = 0x000ffff0,
+ /* PHY_GBIT_FEATURES */
+ .config_init = vsc739x_config_init,
+- .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+ }, {
+@@ -471,7 +458,6 @@ static struct phy_driver vsc82xx_driver[] = {
+ .phy_id_mask = 0x000ffff0,
+ /* PHY_GBIT_FEATURES */
+ .config_init = vsc739x_config_init,
+- .config_aneg = vsc73xx_config_aneg,
+ .read_page = vsc73xx_read_page,
+ .write_page = vsc73xx_write_page,
+ }, {
+diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
+index 6eeef10edadad1..46afb95ffabe3b 100644
+--- a/drivers/net/usb/ipheth.c
++++ b/drivers/net/usb/ipheth.c
+@@ -286,10 +286,11 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
+ return;
+ }
+
+- if (urb->actual_length <= IPHETH_IP_ALIGN) {
+- dev->net->stats.rx_length_errors++;
+- return;
+- }
++ /* iPhone may periodically send URBs with no payload
++ * on the "bulk in" endpoint. It is safe to ignore them.
++ */
++ if (urb->actual_length == 0)
++ goto rx_submit;
+
+ /* RX URBs starting with 0x00 0x01 do not encapsulate Ethernet frames,
+ * but rather are control frames. Their purpose is not documented, and
+@@ -298,7 +299,8 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
+ * URB received from the bulk IN endpoint.
+ */
+ if (unlikely
+- (((char *)urb->transfer_buffer)[0] == 0 &&
++ (urb->actual_length == 4 &&
++ ((char *)urb->transfer_buffer)[0] == 0 &&
+ ((char *)urb->transfer_buffer)[1] == 1))
+ goto rx_submit;
+
+@@ -306,7 +308,6 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
+ if (retval != 0) {
+ dev_err(&dev->intf->dev, "%s: callback retval: %d\n",
+ __func__, retval);
+- return;
+ }
+
+ rx_submit:
+@@ -354,13 +355,14 @@ static int ipheth_carrier_set(struct ipheth_device *dev)
+ 0x02, /* index */
+ dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE,
+ IPHETH_CTRL_TIMEOUT);
+- if (retval < 0) {
++ if (retval <= 0) {
+ dev_err(&dev->intf->dev, "%s: usb_control_msg: %d\n",
+ __func__, retval);
+ return retval;
+ }
+
+- if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) {
++ if ((retval == 1 && dev->ctrl_buf[0] == IPHETH_CARRIER_ON) ||
++ (retval >= 2 && dev->ctrl_buf[1] == IPHETH_CARRIER_ON)) {
+ netif_carrier_on(dev->net);
+ if (dev->tx_urb->status != -EINPROGRESS)
+ netif_wake_queue(dev->net);
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+index 6a5c2cae087d03..6dec54431312ad 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+@@ -1095,7 +1095,7 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct inet6_dev *idev)
+ {
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+- struct mt792x_dev *dev = mvif->phy->dev;
++ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct inet6_ifaddr *ifa;
+ struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
+ struct sk_buff *skb;
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index e7fd1315d7edc2..f28c005c2bb265 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -2131,6 +2131,19 @@ const char *nvmem_dev_name(struct nvmem_device *nvmem)
+ }
+ EXPORT_SYMBOL_GPL(nvmem_dev_name);
+
++/**
++ * nvmem_dev_size() - Get the size of a given nvmem device.
++ *
++ * @nvmem: nvmem device.
++ *
++ * Return: size of the nvmem device.
++ */
++size_t nvmem_dev_size(struct nvmem_device *nvmem)
++{
++ return nvmem->size;
++}
++EXPORT_SYMBOL_GPL(nvmem_dev_size);
++
+ static int __init nvmem_init(void)
+ {
+ return bus_register(&nvmem_bus_type);
+diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c
+index c4ae94af4af78e..adabbfdad6fb6d 100644
+--- a/drivers/nvmem/u-boot-env.c
++++ b/drivers/nvmem/u-boot-env.c
+@@ -23,13 +23,10 @@ enum u_boot_env_format {
+
+ struct u_boot_env {
+ struct device *dev;
++ struct nvmem_device *nvmem;
+ enum u_boot_env_format format;
+
+ struct mtd_info *mtd;
+-
+- /* Cells */
+- struct nvmem_cell_info *cells;
+- int ncells;
+ };
+
+ struct u_boot_env_image_single {
+@@ -94,70 +91,71 @@ static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, i
+ static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf,
+ size_t data_offset, size_t data_len)
+ {
++ struct nvmem_device *nvmem = priv->nvmem;
+ struct device *dev = priv->dev;
+ char *data = buf + data_offset;
+ char *var, *value, *eq;
+- int idx;
+-
+- priv->ncells = 0;
+- for (var = data; var < data + data_len && *var; var += strlen(var) + 1)
+- priv->ncells++;
+
+- priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL);
+- if (!priv->cells)
+- return -ENOMEM;
+-
+- for (var = data, idx = 0;
++ for (var = data;
+ var < data + data_len && *var;
+- var = value + strlen(value) + 1, idx++) {
++ var = value + strlen(value) + 1) {
++ struct nvmem_cell_info info = {};
++
+ eq = strchr(var, '=');
+ if (!eq)
+ break;
+ *eq = '\0';
+ value = eq + 1;
+
+- priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL);
+- if (!priv->cells[idx].name)
++ info.name = devm_kstrdup(dev, var, GFP_KERNEL);
++ if (!info.name)
+ return -ENOMEM;
+- priv->cells[idx].offset = data_offset + value - data;
+- priv->cells[idx].bytes = strlen(value);
+- priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name);
++ info.offset = data_offset + value - data;
++ info.bytes = strlen(value);
++ info.np = of_get_child_by_name(dev->of_node, info.name);
+ if (!strcmp(var, "ethaddr")) {
+- priv->cells[idx].raw_len = strlen(value);
+- priv->cells[idx].bytes = ETH_ALEN;
+- priv->cells[idx].read_post_process = u_boot_env_read_post_process_ethaddr;
++ info.raw_len = strlen(value);
++ info.bytes = ETH_ALEN;
++ info.read_post_process = u_boot_env_read_post_process_ethaddr;
+ }
+- }
+
+- if (WARN_ON(idx != priv->ncells))
+- priv->ncells = idx;
++ nvmem_add_one_cell(nvmem, &info);
++ }
+
+ return 0;
+ }
+
+ static int u_boot_env_parse(struct u_boot_env *priv)
+ {
++ struct nvmem_device *nvmem = priv->nvmem;
+ struct device *dev = priv->dev;
+ size_t crc32_data_offset;
+ size_t crc32_data_len;
+ size_t crc32_offset;
++ __le32 *crc32_addr;
+ size_t data_offset;
+ size_t data_len;
++ size_t dev_size;
+ uint32_t crc32;
+ uint32_t calc;
+- size_t bytes;
+ uint8_t *buf;
++ int bytes;
+ int err;
+
+- buf = kcalloc(1, priv->mtd->size, GFP_KERNEL);
++ dev_size = nvmem_dev_size(nvmem);
++
++ buf = kzalloc(dev_size, GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+- err = mtd_read(priv->mtd, 0, priv->mtd->size, &bytes, buf);
+- if ((err && !mtd_is_bitflip(err)) || bytes != priv->mtd->size) {
+- dev_err(dev, "Failed to read from mtd: %d\n", err);
++ bytes = nvmem_device_read(nvmem, 0, dev_size, buf);
++ if (bytes < 0) {
++ err = bytes;
++ goto err_kfree;
++ } else if (bytes != dev_size) {
++ err = -EIO;
+ goto err_kfree;
+ }
+
+@@ -178,9 +176,17 @@ static int u_boot_env_parse(struct u_boot_env *priv)
+ data_offset = offsetof(struct u_boot_env_image_broadcom, data);
+ break;
+ }
+- crc32 = le32_to_cpu(*(__le32 *)(buf + crc32_offset));
+- crc32_data_len = priv->mtd->size - crc32_data_offset;
+- data_len = priv->mtd->size - data_offset;
++
++ if (dev_size < data_offset) {
++ dev_err(dev, "Device too small for u-boot-env\n");
++ err = -EIO;
++ goto err_kfree;
++ }
++
++ crc32_addr = (__le32 *)(buf + crc32_offset);
++ crc32 = le32_to_cpu(*crc32_addr);
++ crc32_data_len = dev_size - crc32_data_offset;
++ data_len = dev_size - data_offset;
+
+ calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L;
+ if (calc != crc32) {
+@@ -189,10 +195,8 @@ static int u_boot_env_parse(struct u_boot_env *priv)
+ goto err_kfree;
+ }
+
+- buf[priv->mtd->size - 1] = '\0';
++ buf[dev_size - 1] = '\0';
+ err = u_boot_env_add_cells(priv, buf, data_offset, data_len);
+- if (err)
+- dev_err(dev, "Failed to add cells: %d\n", err);
+
+ err_kfree:
+ kfree(buf);
+@@ -209,7 +213,6 @@ static int u_boot_env_probe(struct platform_device *pdev)
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct u_boot_env *priv;
+- int err;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+@@ -224,17 +227,15 @@ static int u_boot_env_probe(struct platform_device *pdev)
+ return PTR_ERR(priv->mtd);
+ }
+
+- err = u_boot_env_parse(priv);
+- if (err)
+- return err;
+-
+ config.dev = dev;
+- config.cells = priv->cells;
+- config.ncells = priv->ncells;
+ config.priv = priv;
+ config.size = priv->mtd->size;
+
+- return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config));
++ priv->nvmem = devm_nvmem_register(dev, &config);
++ if (IS_ERR(priv->nvmem))
++ return PTR_ERR(priv->nvmem);
++
++ return u_boot_env_parse(priv);
+ }
+
+ static const struct of_device_id u_boot_env_of_match_table[] = {
+diff --git a/drivers/pinctrl/intel/pinctrl-meteorlake.c b/drivers/pinctrl/intel/pinctrl-meteorlake.c
+index 7ced2b402dce04..812696dfe30263 100644
+--- a/drivers/pinctrl/intel/pinctrl-meteorlake.c
++++ b/drivers/pinctrl/intel/pinctrl-meteorlake.c
+@@ -583,6 +583,7 @@ static const struct intel_pinctrl_soc_data mtls_soc_data = {
+ };
+
+ static const struct acpi_device_id mtl_pinctrl_acpi_match[] = {
++ { "INTC105E", (kernel_ulong_t)&mtlp_soc_data },
+ { "INTC1083", (kernel_ulong_t)&mtlp_soc_data },
+ { "INTC1082", (kernel_ulong_t)&mtls_soc_data },
+ { }
+diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
+index 0fe5be53965252..8c5b5f35d8485b 100644
+--- a/drivers/platform/surface/surface_aggregator_registry.c
++++ b/drivers/platform/surface/surface_aggregator_registry.c
+@@ -298,7 +298,7 @@ static const struct software_node *ssam_node_group_sp8[] = {
+ NULL,
+ };
+
+-/* Devices for Surface Pro 9 */
++/* Devices for Surface Pro 9 and 10 */
+ static const struct software_node *ssam_node_group_sp9[] = {
+ &ssam_node_root,
+ &ssam_node_hub_kip,
+@@ -337,6 +337,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
+ /* Surface Pro 9 */
+ { "MSHW0343", (unsigned long)ssam_node_group_sp9 },
+
++ /* Surface Pro 10 */
++ { "MSHW0510", (unsigned long)ssam_node_group_sp9 },
++
+ /* Surface Book 2 */
+ { "MSHW0107", (unsigned long)ssam_node_group_gen5 },
+
+@@ -367,6 +370,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
+ /* Surface Laptop Go 2 */
+ { "MSHW0290", (unsigned long)ssam_node_group_slg1 },
+
++ /* Surface Laptop Go 3 */
++ { "MSHW0440", (unsigned long)ssam_node_group_slg1 },
++
+ /* Surface Laptop Studio */
+ { "MSHW0123", (unsigned long)ssam_node_group_sls },
+
+diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
+index cf845ee1c7b1f0..ebd81846e2d564 100644
+--- a/drivers/platform/x86/panasonic-laptop.c
++++ b/drivers/platform/x86/panasonic-laptop.c
+@@ -337,7 +337,8 @@ static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc)
+ }
+
+ if (pcc->num_sifr < hkey->package.count) {
+- pr_err("SQTY reports bad SINF length\n");
++ pr_err("SQTY reports bad SINF length SQTY: %lu SINF-pkg-count: %u\n",
++ pcc->num_sifr, hkey->package.count);
+ status = AE_ERROR;
+ goto end;
+ }
+@@ -773,6 +774,24 @@ static DEVICE_ATTR_RW(dc_brightness);
+ static DEVICE_ATTR_RW(current_brightness);
+ static DEVICE_ATTR_RW(cdpower);
+
++static umode_t pcc_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
++{
++ struct device *dev = kobj_to_dev(kobj);
++ struct acpi_device *acpi = to_acpi_device(dev);
++ struct pcc_acpi *pcc = acpi_driver_data(acpi);
++
++ if (attr == &dev_attr_mute.attr)
++ return (pcc->num_sifr > SINF_MUTE) ? attr->mode : 0;
++
++ if (attr == &dev_attr_eco_mode.attr)
++ return (pcc->num_sifr > SINF_ECO_MODE) ? attr->mode : 0;
++
++ if (attr == &dev_attr_current_brightness.attr)
++ return (pcc->num_sifr > SINF_CUR_BRIGHT) ? attr->mode : 0;
++
++ return attr->mode;
++}
++
+ static struct attribute *pcc_sysfs_entries[] = {
+ &dev_attr_numbatt.attr,
+ &dev_attr_lcdtype.attr,
+@@ -787,8 +806,9 @@ static struct attribute *pcc_sysfs_entries[] = {
+ };
+
+ static const struct attribute_group pcc_attr_group = {
+- .name = NULL, /* put in device directory */
+- .attrs = pcc_sysfs_entries,
++ .name = NULL, /* put in device directory */
++ .attrs = pcc_sysfs_entries,
++ .is_visible = pcc_sysfs_is_visible,
+ };
+
+
+@@ -941,12 +961,15 @@ static int acpi_pcc_hotkey_resume(struct device *dev)
+ if (!pcc)
+ return -EINVAL;
+
+- acpi_pcc_write_sset(pcc, SINF_MUTE, pcc->mute);
+- acpi_pcc_write_sset(pcc, SINF_ECO_MODE, pcc->eco_mode);
++ if (pcc->num_sifr > SINF_MUTE)
++ acpi_pcc_write_sset(pcc, SINF_MUTE, pcc->mute);
++ if (pcc->num_sifr > SINF_ECO_MODE)
++ acpi_pcc_write_sset(pcc, SINF_ECO_MODE, pcc->eco_mode);
+ acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_key);
+ acpi_pcc_write_sset(pcc, SINF_AC_CUR_BRIGHT, pcc->ac_brightness);
+ acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, pcc->dc_brightness);
+- acpi_pcc_write_sset(pcc, SINF_CUR_BRIGHT, pcc->current_brightness);
++ if (pcc->num_sifr > SINF_CUR_BRIGHT)
++ acpi_pcc_write_sset(pcc, SINF_CUR_BRIGHT, pcc->current_brightness);
+
+ return 0;
+ }
+@@ -963,11 +986,21 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
+
+ num_sifr = acpi_pcc_get_sqty(device);
+
+- if (num_sifr < 0 || num_sifr > 255) {
+- pr_err("num_sifr out of range");
++ /*
++ * pcc->sinf is expected to at least have the AC+DC brightness entries.
++ * Accesses to higher SINF entries are checked against num_sifr.
++ */
++ if (num_sifr <= SINF_DC_CUR_BRIGHT || num_sifr > 255) {
++ pr_err("num_sifr %d out of range %d - 255\n", num_sifr, SINF_DC_CUR_BRIGHT + 1);
+ return -ENODEV;
+ }
+
++ /*
++ * Some DSDT-s have an off-by-one bug where the SINF package count is
++ * one higher than the SQTY reported value, allocate 1 entry extra.
++ */
++ num_sifr++;
++
+ pcc = kzalloc(sizeof(struct pcc_acpi), GFP_KERNEL);
+ if (!pcc) {
+ pr_err("Couldn't allocate mem for pcc");
+@@ -1020,11 +1053,14 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
+ acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, 0);
+ pcc->sticky_key = 0;
+
+- pcc->eco_mode = pcc->sinf[SINF_ECO_MODE];
+- pcc->mute = pcc->sinf[SINF_MUTE];
+ pcc->ac_brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
+ pcc->dc_brightness = pcc->sinf[SINF_DC_CUR_BRIGHT];
+- pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
++ if (pcc->num_sifr > SINF_MUTE)
++ pcc->mute = pcc->sinf[SINF_MUTE];
++ if (pcc->num_sifr > SINF_ECO_MODE)
++ pcc->eco_mode = pcc->sinf[SINF_ECO_MODE];
++ if (pcc->num_sifr > SINF_CUR_BRIGHT)
++ pcc->current_brightness = pcc->sinf[SINF_CUR_BRIGHT];
+
+ /* add sysfs attributes */
+ result = sysfs_create_group(&device->dev.kobj, &pcc_attr_group);
+diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
+index cf69d5c415fbfb..68d54887992d91 100644
+--- a/drivers/soundwire/stream.c
++++ b/drivers/soundwire/stream.c
+@@ -1286,18 +1286,18 @@ struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
+ unsigned int port_num)
+ {
+ struct sdw_dpn_prop *dpn_prop;
+- unsigned long mask;
++ u8 num_ports;
+ int i;
+
+ if (direction == SDW_DATA_DIR_TX) {
+- mask = slave->prop.source_ports;
++ num_ports = hweight32(slave->prop.source_ports);
+ dpn_prop = slave->prop.src_dpn_prop;
+ } else {
+- mask = slave->prop.sink_ports;
++ num_ports = hweight32(slave->prop.sink_ports);
+ dpn_prop = slave->prop.sink_dpn_prop;
+ }
+
+- for_each_set_bit(i, &mask, 32) {
++ for (i = 0; i < num_ports; i++) {
+ if (dpn_prop[i].num == port_num)
+ return &dpn_prop[i];
+ }
+diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
+index f4f376a8351b4a..7401ed3b9acd40 100644
+--- a/drivers/spi/spi-geni-qcom.c
++++ b/drivers/spi/spi-geni-qcom.c
+@@ -1110,25 +1110,27 @@ static int spi_geni_probe(struct platform_device *pdev)
+ spin_lock_init(&mas->lock);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
+- pm_runtime_enable(dev);
++ ret = devm_pm_runtime_enable(dev);
++ if (ret)
++ return ret;
+
+ if (device_property_read_bool(&pdev->dev, "spi-slave"))
+ spi->slave = true;
+
+ ret = geni_icc_get(&mas->se, NULL);
+ if (ret)
+- goto spi_geni_probe_runtime_disable;
++ return ret;
+ /* Set the bus quota to a reasonable value for register access */
+ mas->se.icc_paths[GENI_TO_CORE].avg_bw = Bps_to_icc(CORE_2X_50_MHZ);
+ mas->se.icc_paths[CPU_TO_GENI].avg_bw = GENI_DEFAULT_BW;
+
+ ret = geni_icc_set_bw(&mas->se);
+ if (ret)
+- goto spi_geni_probe_runtime_disable;
++ return ret;
+
+ ret = spi_geni_init(mas);
+ if (ret)
+- goto spi_geni_probe_runtime_disable;
++ return ret;
+
+ /*
+ * check the mode supported and set_cs for fifo mode only
+@@ -1157,8 +1159,6 @@ static int spi_geni_probe(struct platform_device *pdev)
+ free_irq(mas->irq, spi);
+ spi_geni_release_dma:
+ spi_geni_release_dma_chan(mas);
+-spi_geni_probe_runtime_disable:
+- pm_runtime_disable(dev);
+ return ret;
+ }
+
+@@ -1170,10 +1170,9 @@ static void spi_geni_remove(struct platform_device *pdev)
+ /* Unregister _before_ disabling pm_runtime() so we stop transfers */
+ spi_unregister_master(spi);
+
+- spi_geni_release_dma_chan(mas);
+-
+ free_irq(mas->irq, spi);
+- pm_runtime_disable(&pdev->dev);
++
++ spi_geni_release_dma_chan(mas);
+ }
+
+ static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
+diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
+index 168eff721ed378..93a9667f6bdcf1 100644
+--- a/drivers/spi/spi-nxp-fspi.c
++++ b/drivers/spi/spi-nxp-fspi.c
+@@ -805,14 +805,15 @@ static void nxp_fspi_fill_txfifo(struct nxp_fspi *f,
+ if (i < op->data.nbytes) {
+ u32 data = 0;
+ int j;
++ int remaining = op->data.nbytes - i;
+ /* Wait for TXFIFO empty */
+ ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
+ FSPI_INTR_IPTXWE, 0,
+ POLL_TOUT, true);
+ WARN_ON(ret);
+
+- for (j = 0; j < ALIGN(op->data.nbytes - i, 4); j += 4) {
+- memcpy(&data, buf + i + j, 4);
++ for (j = 0; j < ALIGN(remaining, 4); j += 4) {
++ memcpy(&data, buf + i + j, min_t(int, 4, remaining - j));
+ fspi_writel(f, data, base + FSPI_TFDR + j);
+ }
+ fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
+diff --git a/drivers/staging/media/atomisp/pci/sh_css_frac.h b/drivers/staging/media/atomisp/pci/sh_css_frac.h
+index 8f08df5c88cc36..569a2f59e5519f 100644
+--- a/drivers/staging/media/atomisp/pci/sh_css_frac.h
++++ b/drivers/staging/media/atomisp/pci/sh_css_frac.h
+@@ -30,12 +30,24 @@
+ #define uISP_VAL_MAX ((unsigned int)((1 << uISP_REG_BIT) - 1))
+
+ /* a:fraction bits for 16bit precision, b:fraction bits for ISP precision */
+-#define sDIGIT_FITTING(v, a, b) \
+- min_t(int, max_t(int, (((v) >> sSHIFT) >> max(sFRACTION_BITS_FITTING(a) - (b), 0)), \
+- sISP_VAL_MIN), sISP_VAL_MAX)
+-#define uDIGIT_FITTING(v, a, b) \
+- min((unsigned int)max((unsigned)(((v) >> uSHIFT) \
+- >> max((int)(uFRACTION_BITS_FITTING(a) - (b)), 0)), \
+- uISP_VAL_MIN), uISP_VAL_MAX)
++static inline int sDIGIT_FITTING(int v, int a, int b)
++{
++ int fit_shift = sFRACTION_BITS_FITTING(a) - b;
++
++ v >>= sSHIFT;
++ v >>= fit_shift > 0 ? fit_shift : 0;
++
++ return clamp_t(int, v, sISP_VAL_MIN, sISP_VAL_MAX);
++}
++
++static inline unsigned int uDIGIT_FITTING(unsigned int v, int a, int b)
++{
++ int fit_shift = uFRACTION_BITS_FITTING(a) - b;
++
++ v >>= uSHIFT;
++ v >>= fit_shift > 0 ? fit_shift : 0;
++
++ return clamp_t(unsigned int, v, uISP_VAL_MIN, uISP_VAL_MAX);
++}
+
+ #endif /* __SH_CSS_FRAC_H */
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index a4223821188788..ee04185d8e0f58 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4148,6 +4148,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
+
+ btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
+ inode_inc_iversion(&inode->vfs_inode);
++ inode_set_ctime_current(&inode->vfs_inode);
+ inode_inc_iversion(&dir->vfs_inode);
+ inode_set_ctime_current(&inode->vfs_inode);
+ dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode);
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index cf7365581031b5..a2034511b63144 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -627,6 +627,9 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
+ prev = delegation;
+ continue;
+ }
++ inode = nfs_delegation_grab_inode(delegation);
++ if (inode == NULL)
++ continue;
+
+ if (prev) {
+ struct inode *tmp = nfs_delegation_grab_inode(prev);
+@@ -637,12 +640,6 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
+ }
+ }
+
+- inode = nfs_delegation_grab_inode(delegation);
+- if (inode == NULL) {
+- rcu_read_unlock();
+- iput(to_put);
+- goto restart;
+- }
+ delegation = nfs_start_delegation_return_locked(NFS_I(inode));
+ rcu_read_unlock();
+
+@@ -1164,7 +1161,6 @@ static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server,
+ struct inode *inode;
+ restart:
+ rcu_read_lock();
+-restart_locked:
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ if (test_bit(NFS_DELEGATION_INODE_FREEING,
+ &delegation->flags) ||
+@@ -1175,7 +1171,7 @@ static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server,
+ continue;
+ inode = nfs_delegation_grab_inode(delegation);
+ if (inode == NULL)
+- goto restart_locked;
++ continue;
+ delegation = nfs_start_delegation_return_locked(NFS_I(inode));
+ rcu_read_unlock();
+ if (delegation != NULL) {
+@@ -1296,7 +1292,6 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
+ nfs4_stateid stateid;
+ restart:
+ rcu_read_lock();
+-restart_locked:
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ if (test_bit(NFS_DELEGATION_INODE_FREEING,
+ &delegation->flags) ||
+@@ -1307,7 +1302,7 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
+ continue;
+ inode = nfs_delegation_grab_inode(delegation);
+ if (inode == NULL)
+- goto restart_locked;
++ continue;
+ spin_lock(&delegation->lock);
+ cred = get_cred_rcu(delegation->cred);
+ nfs4_stateid_copy(&stateid, &delegation->stateid);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index e7ac249df1ad6c..299ea2b86df668 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -9845,13 +9845,16 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
+ fallthrough;
+ default:
+ task->tk_status = 0;
++ lrp->res.lrs_present = 0;
+ fallthrough;
+ case 0:
+ break;
+ case -NFS4ERR_DELAY:
+- if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
+- break;
+- goto out_restart;
++ if (nfs4_async_handle_error(task, server, NULL, NULL) ==
++ -EAGAIN)
++ goto out_restart;
++ lrp->res.lrs_present = 0;
++ break;
+ }
+ return;
+ out_restart:
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 664d3128e730c0..3d1a9f8634a999 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1172,10 +1172,9 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
+ LIST_HEAD(freeme);
+
+ spin_lock(&inode->i_lock);
+- if (!pnfs_layout_is_valid(lo) ||
+- !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
++ if (!nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
+ goto out_unlock;
+- if (stateid) {
++ if (stateid && pnfs_layout_is_valid(lo)) {
+ u32 seq = be32_to_cpu(arg_stateid->seqid);
+
+ pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
+diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
+index 6322f0f68a176b..b0473c2567fe68 100644
+--- a/fs/smb/client/cifsencrypt.c
++++ b/fs/smb/client/cifsencrypt.c
+@@ -129,7 +129,7 @@ static ssize_t cifs_shash_xarray(const struct iov_iter *iter, ssize_t maxsize,
+ for (j = foffset / PAGE_SIZE; j < npages; j++) {
+ len = min_t(size_t, maxsize, PAGE_SIZE - offset);
+ p = kmap_local_page(folio_page(folio, j));
+- ret = crypto_shash_update(shash, p, len);
++ ret = crypto_shash_update(shash, p + offset, len);
+ kunmap_local(p);
+ if (ret < 0)
+ return ret;
+diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c
+index e0a6b758094fc5..d8d03070ae44b4 100644
+--- a/fs/smb/server/mgmt/share_config.c
++++ b/fs/smb/server/mgmt/share_config.c
+@@ -15,6 +15,7 @@
+ #include "share_config.h"
+ #include "user_config.h"
+ #include "user_session.h"
++#include "../connection.h"
+ #include "../transport_ipc.h"
+ #include "../misc.h"
+
+@@ -120,12 +121,13 @@ static int parse_veto_list(struct ksmbd_share_config *share,
+ return 0;
+ }
+
+-static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
++static struct ksmbd_share_config *share_config_request(struct ksmbd_work *work,
+ const char *name)
+ {
+ struct ksmbd_share_config_response *resp;
+ struct ksmbd_share_config *share = NULL;
+ struct ksmbd_share_config *lookup;
++ struct unicode_map *um = work->conn->um;
+ int ret;
+
+ resp = ksmbd_ipc_share_config_request(name);
+@@ -181,7 +183,14 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
+ KSMBD_SHARE_CONFIG_VETO_LIST(resp),
+ resp->veto_list_sz);
+ if (!ret && share->path) {
++ if (__ksmbd_override_fsids(work, share)) {
++ kill_share(share);
++ share = NULL;
++ goto out;
++ }
++
+ ret = kern_path(share->path, 0, &share->vfs_path);
++ ksmbd_revert_fsids(work);
+ if (ret) {
+ ksmbd_debug(SMB, "failed to access '%s'\n",
+ share->path);
+@@ -214,7 +223,7 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
+ return share;
+ }
+
+-struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
++struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work,
+ const char *name)
+ {
+ struct ksmbd_share_config *share;
+@@ -227,7 +236,7 @@ struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
+
+ if (share)
+ return share;
+- return share_config_request(um, name);
++ return share_config_request(work, name);
+ }
+
+ bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
+diff --git a/fs/smb/server/mgmt/share_config.h b/fs/smb/server/mgmt/share_config.h
+index 5f591751b92365..d4ac2dd4de2040 100644
+--- a/fs/smb/server/mgmt/share_config.h
++++ b/fs/smb/server/mgmt/share_config.h
+@@ -11,6 +11,8 @@
+ #include <linux/path.h>
+ #include <linux/unicode.h>
+
++struct ksmbd_work;
++
+ struct ksmbd_share_config {
+ char *name;
+ char *path;
+@@ -68,7 +70,7 @@ static inline void ksmbd_share_config_put(struct ksmbd_share_config *share)
+ __ksmbd_share_config_put(share);
+ }
+
+-struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
++struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work,
+ const char *name);
+ bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
+ const char *filename);
+diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c
+index d2c81a8a11dda1..94a52a75014a43 100644
+--- a/fs/smb/server/mgmt/tree_connect.c
++++ b/fs/smb/server/mgmt/tree_connect.c
+@@ -16,17 +16,18 @@
+ #include "user_session.h"
+
+ struct ksmbd_tree_conn_status
+-ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+- const char *share_name)
++ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name)
+ {
+ struct ksmbd_tree_conn_status status = {-ENOENT, NULL};
+ struct ksmbd_tree_connect_response *resp = NULL;
+ struct ksmbd_share_config *sc;
+ struct ksmbd_tree_connect *tree_conn = NULL;
+ struct sockaddr *peer_addr;
++ struct ksmbd_conn *conn = work->conn;
++ struct ksmbd_session *sess = work->sess;
+ int ret;
+
+- sc = ksmbd_share_config_get(conn->um, share_name);
++ sc = ksmbd_share_config_get(work, share_name);
+ if (!sc)
+ return status;
+
+@@ -61,7 +62,7 @@ ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+ struct ksmbd_share_config *new_sc;
+
+ ksmbd_share_config_del(sc);
+- new_sc = ksmbd_share_config_get(conn->um, share_name);
++ new_sc = ksmbd_share_config_get(work, share_name);
+ if (!new_sc) {
+ pr_err("Failed to update stale share config\n");
+ status.ret = -ESTALE;
+diff --git a/fs/smb/server/mgmt/tree_connect.h b/fs/smb/server/mgmt/tree_connect.h
+index 6377a70b811c89..a42cdd05104114 100644
+--- a/fs/smb/server/mgmt/tree_connect.h
++++ b/fs/smb/server/mgmt/tree_connect.h
+@@ -13,6 +13,7 @@
+ struct ksmbd_share_config;
+ struct ksmbd_user;
+ struct ksmbd_conn;
++struct ksmbd_work;
+
+ enum {
+ TREE_NEW = 0,
+@@ -50,8 +51,7 @@ static inline int test_tree_conn_flag(struct ksmbd_tree_connect *tree_conn,
+ struct ksmbd_session;
+
+ struct ksmbd_tree_conn_status
+-ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
+- const char *share_name);
++ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name);
+ void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon);
+
+ int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 458cc736286aae..c6473b08b1f358 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -1959,7 +1959,7 @@ int smb2_tree_connect(struct ksmbd_work *work)
+ ksmbd_debug(SMB, "tree connect request for tree %s treename %s\n",
+ name, treename);
+
+- status = ksmbd_tree_conn_connect(conn, sess, name);
++ status = ksmbd_tree_conn_connect(work, name);
+ if (status.ret == KSMBD_TREE_CONN_STATUS_OK)
+ rsp->hdr.Id.SyncId.TreeId = cpu_to_le32(status.tree_conn->id);
+ else
+@@ -3714,7 +3714,7 @@ int smb2_open(struct ksmbd_work *work)
+ kfree(name);
+ kfree(lc);
+
+- return 0;
++ return rc;
+ }
+
+ static int readdir_info_level_struct_sz(int info_level)
+@@ -5601,6 +5601,11 @@ int smb2_query_info(struct ksmbd_work *work)
+
+ ksmbd_debug(SMB, "GOT query info request\n");
+
++ if (ksmbd_override_fsids(work)) {
++ rc = -ENOMEM;
++ goto err_out;
++ }
++
+ switch (req->InfoType) {
+ case SMB2_O_INFO_FILE:
+ ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
+@@ -5619,6 +5624,7 @@ int smb2_query_info(struct ksmbd_work *work)
+ req->InfoType);
+ rc = -EOPNOTSUPP;
+ }
++ ksmbd_revert_fsids(work);
+
+ if (!rc) {
+ rsp->StructureSize = cpu_to_le16(9);
+@@ -5628,6 +5634,7 @@ int smb2_query_info(struct ksmbd_work *work)
+ le32_to_cpu(rsp->OutputBufferLength));
+ }
+
++err_out:
+ if (rc < 0) {
+ if (rc == -EACCES)
+ rsp->hdr.Status = STATUS_ACCESS_DENIED;
+diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
+index 474dadf6b7b8bc..13818ecb6e1b2f 100644
+--- a/fs/smb/server/smb_common.c
++++ b/fs/smb/server/smb_common.c
+@@ -732,10 +732,10 @@ bool is_asterisk(char *p)
+ return p && p[0] == '*';
+ }
+
+-int ksmbd_override_fsids(struct ksmbd_work *work)
++int __ksmbd_override_fsids(struct ksmbd_work *work,
++ struct ksmbd_share_config *share)
+ {
+ struct ksmbd_session *sess = work->sess;
+- struct ksmbd_share_config *share = work->tcon->share_conf;
+ struct cred *cred;
+ struct group_info *gi;
+ unsigned int uid;
+@@ -775,6 +775,11 @@ int ksmbd_override_fsids(struct ksmbd_work *work)
+ return 0;
+ }
+
++int ksmbd_override_fsids(struct ksmbd_work *work)
++{
++ return __ksmbd_override_fsids(work, work->tcon->share_conf);
++}
++
+ void ksmbd_revert_fsids(struct ksmbd_work *work)
+ {
+ const struct cred *cred;
+diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h
+index f1092519c0c288..4a3148b0167f54 100644
+--- a/fs/smb/server/smb_common.h
++++ b/fs/smb/server/smb_common.h
+@@ -447,6 +447,8 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn,
+ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command);
+
+ int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp);
++int __ksmbd_override_fsids(struct ksmbd_work *work,
++ struct ksmbd_share_config *share);
+ int ksmbd_override_fsids(struct ksmbd_work *work);
+ void ksmbd_revert_fsids(struct ksmbd_work *work);
+
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 3d1cd726df3471..9106771bb92f01 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -1010,7 +1010,8 @@ struct mlx5_ifc_qos_cap_bits {
+
+ u8 max_tsar_bw_share[0x20];
+
+- u8 reserved_at_100[0x20];
++ u8 nic_element_type[0x10];
++ u8 nic_tsar_type[0x10];
+
+ u8 reserved_at_120[0x3];
+ u8 log_meter_aso_granularity[0x5];
+@@ -3843,10 +3844,11 @@ enum {
+ };
+
+ enum {
+- ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0,
++ ELEMENT_TYPE_CAP_MASK_TSAR = 1 << 0,
+ ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
+ ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
+ ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
++ ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4,
+ };
+
+ struct mlx5_ifc_scheduling_context_bits {
+@@ -4546,6 +4548,12 @@ enum {
+ TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
+ };
+
++enum {
++ TSAR_TYPE_CAP_MASK_DWRR = 1 << 0,
++ TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1,
++ TSAR_TYPE_CAP_MASK_ETS = 1 << 2,
++};
++
+ struct mlx5_ifc_tsar_element_bits {
+ u8 reserved_at_0[0x8];
+ u8 tsar_type[0x8];
+diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
+index 98b2e1e149f93c..5cc34216f23c32 100644
+--- a/include/linux/mlx5/port.h
++++ b/include/linux/mlx5/port.h
+@@ -115,7 +115,7 @@ enum mlx5e_ext_link_mode {
+ MLX5E_100GAUI_1_100GBASE_CR_KR = 11,
+ MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
+ MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13,
+- MLX5E_400GAUI_8 = 15,
++ MLX5E_400GAUI_8_400GBASE_CR8 = 15,
+ MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16,
+ MLX5E_EXT_LINK_MODES_NUMBER,
+ };
+diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
+index 4523e4e8331970..526025561df199 100644
+--- a/include/linux/nvmem-consumer.h
++++ b/include/linux/nvmem-consumer.h
+@@ -81,6 +81,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
+ struct nvmem_cell_info *info, void *buf);
+
+ const char *nvmem_dev_name(struct nvmem_device *nvmem);
++size_t nvmem_dev_size(struct nvmem_device *nvmem);
+
+ void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries,
+ size_t nentries);
+diff --git a/include/linux/property.h b/include/linux/property.h
+index 1684fca930f726..d32b8052e0863b 100644
+--- a/include/linux/property.h
++++ b/include/linux/property.h
+@@ -11,6 +11,7 @@
+ #define _LINUX_PROPERTY_H_
+
+ #include <linux/bits.h>
++#include <linux/cleanup.h>
+ #include <linux/fwnode.h>
+ #include <linux/stddef.h>
+ #include <linux/types.h>
+@@ -167,6 +168,11 @@ struct fwnode_handle *device_get_next_child_node(const struct device *dev,
+ for (child = device_get_next_child_node(dev, NULL); child; \
+ child = device_get_next_child_node(dev, child))
+
++#define device_for_each_child_node_scoped(dev, child) \
++ for (struct fwnode_handle *child __free(fwnode_handle) = \
++ device_get_next_child_node(dev, NULL); \
++ child; child = device_get_next_child_node(dev, child))
++
+ struct fwnode_handle *fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+ const char *childname);
+ struct fwnode_handle *device_get_named_child_node(const struct device *dev,
+@@ -175,6 +181,8 @@ struct fwnode_handle *device_get_named_child_node(const struct device *dev,
+ struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode);
+ void fwnode_handle_put(struct fwnode_handle *fwnode);
+
++DEFINE_FREE(fwnode_handle, struct fwnode_handle *, fwnode_handle_put(_T))
++
+ int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index);
+ int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name);
+
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index 6c395a2600e8d1..276ca543ef44d8 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -173,7 +173,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
+ break;
+ case SKB_GSO_TCPV4:
+ case SKB_GSO_TCPV6:
+- if (skb->csum_offset != offsetof(struct tcphdr, check))
++ if (skb->ip_summed == CHECKSUM_PARTIAL &&
++ skb->csum_offset != offsetof(struct tcphdr, check))
+ return -EINVAL;
+ break;
+ }
+diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
+index 5b06f67879f5fa..461b4ab60b501a 100644
+--- a/kernel/trace/trace_osnoise.c
++++ b/kernel/trace/trace_osnoise.c
+@@ -228,6 +228,11 @@ static inline struct osnoise_variables *this_cpu_osn_var(void)
+ return this_cpu_ptr(&per_cpu_osnoise_var);
+ }
+
++/*
++ * Protect the interface.
++ */
++static struct mutex interface_lock;
++
+ #ifdef CONFIG_TIMERLAT_TRACER
+ /*
+ * Runtime information for the timer mode.
+@@ -252,11 +257,6 @@ static inline struct timerlat_variables *this_cpu_tmr_var(void)
+ return this_cpu_ptr(&per_cpu_timerlat_var);
+ }
+
+-/*
+- * Protect the interface.
+- */
+-static struct mutex interface_lock;
+-
+ /*
+ * tlat_var_reset - Reset the values of the given timerlat_variables
+ */
+diff --git a/mm/memory.c b/mm/memory.c
+index bfd2273cb4b460..b6ddfe22c5d5c0 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2424,11 +2424,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
+ return 0;
+ }
+
+-/*
+- * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
+- * must have pre-validated the caching bits of the pgprot_t.
+- */
+-int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
++static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+ {
+ pgd_t *pgd;
+@@ -2481,6 +2477,27 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+ return 0;
+ }
+
++/*
++ * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
++ * must have pre-validated the caching bits of the pgprot_t.
++ */
++int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
++ unsigned long pfn, unsigned long size, pgprot_t prot)
++{
++ int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
++
++ if (!error)
++ return 0;
++
++ /*
++ * A partial pfn range mapping is dangerous: it does not
++ * maintain page reference counts, and callers may free
++ * pages due to the error. So zap it early.
++ */
++ zap_page_range_single(vma, addr, size, NULL);
++ return error;
++}
++
+ /**
+ * remap_pfn_range - remap kernel memory to userspace
+ * @vma: user vma to map to
+diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c
+index e0b8d6b17a34dc..4e0a7d038e219c 100644
+--- a/net/ipv4/fou_core.c
++++ b/net/ipv4/fou_core.c
+@@ -336,11 +336,11 @@ static struct sk_buff *gue_gro_receive(struct sock *sk,
+ struct gro_remcsum grc;
+ u8 proto;
+
++ skb_gro_remcsum_init(&grc);
++
+ if (!fou)
+ goto out;
+
+- skb_gro_remcsum_init(&grc);
+-
+ off = skb_gro_offset(skb);
+ len = off + sizeof(*guehdr);
+
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index d5902e7f47a782..953c22c0ec47ec 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -339,15 +339,21 @@ mptcp_pm_del_add_timer(struct mptcp_sock *msk,
+ {
+ struct mptcp_pm_add_entry *entry;
+ struct sock *sk = (struct sock *)msk;
++ struct timer_list *add_timer = NULL;
+
+ spin_lock_bh(&msk->pm.lock);
+ entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
+- if (entry && (!check_id || entry->addr.id == addr->id))
++ if (entry && (!check_id || entry->addr.id == addr->id)) {
+ entry->retrans_times = ADD_ADDR_RETRANS_MAX;
++ add_timer = &entry->add_timer;
++ }
++ if (!check_id && entry)
++ list_del(&entry->list);
+ spin_unlock_bh(&msk->pm.lock);
+
+- if (entry && (!check_id || entry->addr.id == addr->id))
+- sk_stop_timer_sync(sk, &entry->add_timer);
++ /* no lock, because sk_stop_timer_sync() is calling del_timer_sync() */
++ if (add_timer)
++ sk_stop_timer_sync(sk, add_timer);
+
+ return entry;
+ }
+@@ -1493,7 +1499,6 @@ static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
+
+ entry = mptcp_pm_del_add_timer(msk, addr, false);
+ if (entry) {
+- list_del(&entry->list);
+ kfree(entry);
+ return true;
+ }
+diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
+index f30163e2ca6207..765ffd6e06bc41 100644
+--- a/net/netfilter/nft_socket.c
++++ b/net/netfilter/nft_socket.c
+@@ -110,13 +110,13 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ *dest = READ_ONCE(sk->sk_mark);
+ } else {
+ regs->verdict.code = NFT_BREAK;
+- return;
++ goto out_put_sk;
+ }
+ break;
+ case NFT_SOCKET_WILDCARD:
+ if (!sk_fullsock(sk)) {
+ regs->verdict.code = NFT_BREAK;
+- return;
++ goto out_put_sk;
+ }
+ nft_socket_wildcard(pkt, regs, sk, dest);
+ break;
+@@ -124,7 +124,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ case NFT_SOCKET_CGROUPV2:
+ if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) {
+ regs->verdict.code = NFT_BREAK;
+- return;
++ goto out_put_sk;
+ }
+ break;
+ #endif
+@@ -133,6 +133,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
+ regs->verdict.code = NFT_BREAK;
+ }
+
++out_put_sk:
+ if (sk != skb->sk)
+ sock_gen_put(sk);
+ }
+diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
+index 902eb429b9dbd9..0b7952471c18f6 100755
+--- a/scripts/kconfig/merge_config.sh
++++ b/scripts/kconfig/merge_config.sh
+@@ -167,6 +167,8 @@ for ORIG_MERGE_FILE in $MERGE_LIST ; do
+ sed -i "/$CFG[ =]/d" $MERGE_FILE
+ fi
+ done
++ # In case the previous file lacks a new line at the end
++ echo >> $TMP_FILE
+ cat $MERGE_FILE >> $TMP_FILE
+ done
+
+diff --git a/sound/soc/codecs/peb2466.c b/sound/soc/codecs/peb2466.c
+index 5dec69be0acb2e..06c83d2042f3e5 100644
+--- a/sound/soc/codecs/peb2466.c
++++ b/sound/soc/codecs/peb2466.c
+@@ -229,7 +229,8 @@ static int peb2466_reg_read(void *context, unsigned int reg, unsigned int *val)
+ case PEB2466_CMD_XOP:
+ case PEB2466_CMD_SOP:
+ ret = peb2466_read_byte(peb2466, reg, &tmp);
+- *val = tmp;
++ if (!ret)
++ *val = tmp;
+ break;
+ default:
+ dev_err(&peb2466->spi->dev, "Not a XOP or SOP command\n");
+diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
+index b6f5b4572012da..44175b1b14a295 100644
+--- a/sound/soc/meson/axg-card.c
++++ b/sound/soc/meson/axg-card.c
+@@ -104,7 +104,7 @@ static int axg_card_add_tdm_loopback(struct snd_soc_card *card,
+ int *index)
+ {
+ struct meson_card *priv = snd_soc_card_get_drvdata(card);
+- struct snd_soc_dai_link *pad = &card->dai_link[*index];
++ struct snd_soc_dai_link *pad;
+ struct snd_soc_dai_link *lb;
+ struct snd_soc_dai_link_component *dlc;
+ int ret;
+@@ -114,6 +114,7 @@ static int axg_card_add_tdm_loopback(struct snd_soc_card *card,
+ if (ret)
+ return ret;
+
++ pad = &card->dai_link[*index];
+ lb = &card->dai_link[*index + 1];
+
+ lb->name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-lb", pad->name);
+diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+index 8df8cbb447f10f..84d59419e4eb5b 100644
+--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
++++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+@@ -1841,7 +1841,7 @@ static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd,
+ if (err)
+ return;
+
+- if (socketpair(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0, sfd))
++ if (socketpair(AF_UNIX, type | SOCK_NONBLOCK, 0, sfd))
+ goto close_cli0;
+ c1 = sfd[0], p1 = sfd[1];
+
+@@ -1876,7 +1876,6 @@ static void unix_inet_redir_to_connected(int family, int type, int sock_mapfd,
+ close_cli0:
+ xclose(c0);
+ xclose(p0);
+-
+ }
+
+ static void unix_inet_skb_redir_to_connected(struct test_sockmap_listen *skel,
+diff --git a/tools/testing/selftests/net/csum.c b/tools/testing/selftests/net/csum.c
+index 90eb06fefa59ec..eef72b50270c5d 100644
+--- a/tools/testing/selftests/net/csum.c
++++ b/tools/testing/selftests/net/csum.c
+@@ -654,10 +654,16 @@ static int recv_verify_packet_ipv4(void *nh, int len)
+ {
+ struct iphdr *iph = nh;
+ uint16_t proto = cfg_encap ? IPPROTO_UDP : cfg_proto;
++ uint16_t ip_len;
+
+ if (len < sizeof(*iph) || iph->protocol != proto)
+ return -1;
+
++ ip_len = ntohs(iph->tot_len);
++ if (ip_len > len || ip_len < sizeof(*iph))
++ return -1;
++
++ len = ip_len;
+ iph_addr_p = &iph->saddr;
+ if (proto == IPPROTO_TCP)
+ return recv_verify_packet_tcp(iph + 1, len - sizeof(*iph));
+@@ -669,16 +675,22 @@ static int recv_verify_packet_ipv6(void *nh, int len)
+ {
+ struct ipv6hdr *ip6h = nh;
+ uint16_t proto = cfg_encap ? IPPROTO_UDP : cfg_proto;
++ uint16_t ip_len;
+
+ if (len < sizeof(*ip6h) || ip6h->nexthdr != proto)
+ return -1;
+
++ ip_len = ntohs(ip6h->payload_len);
++ if (ip_len > len - sizeof(*ip6h))
++ return -1;
++
++ len = ip_len;
+ iph_addr_p = &ip6h->saddr;
+
+ if (proto == IPPROTO_TCP)
+- return recv_verify_packet_tcp(ip6h + 1, len - sizeof(*ip6h));
++ return recv_verify_packet_tcp(ip6h + 1, len);
+ else
+- return recv_verify_packet_udp(ip6h + 1, len - sizeof(*ip6h));
++ return recv_verify_packet_udp(ip6h + 1, len);
+ }
+
+ /* return whether auxdata includes TP_STATUS_CSUM_VALID */
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index 6faff03acc110b..3c286fba8d5dc5 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3222,7 +3222,9 @@ fullmesh_tests()
+ pm_nl_set_limits $ns1 1 3
+ pm_nl_set_limits $ns2 1 3
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+- pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,fullmesh
++ if mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then
++ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,fullmesh
++ fi
+ fullmesh=1 speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 3 3 3
next reply other threads:[~2024-09-18 18:03 UTC|newest]
Thread overview: 98+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-18 18:03 Mike Pagano [this message]
-- strict thread matches above, loose matches on Subject: below --
2024-11-08 16:30 [gentoo-commits] proj/linux-patches:6.6 commit in: / Mike Pagano
2024-11-04 20:46 Mike Pagano
2024-11-03 11:26 Mike Pagano
2024-11-01 12:02 Mike Pagano
2024-11-01 11:52 Mike Pagano
2024-11-01 11:27 Mike Pagano
2024-10-26 22:46 Mike Pagano
2024-10-25 11:44 Mike Pagano
2024-10-22 16:57 Mike Pagano
2024-10-17 14:28 Mike Pagano
2024-10-17 14:05 Mike Pagano
2024-10-10 11:37 Mike Pagano
2024-10-04 15:23 Mike Pagano
2024-09-30 16:04 Mike Pagano
2024-09-30 15:18 Mike Pagano
2024-09-12 12:32 Mike Pagano
2024-09-08 11:06 Mike Pagano
2024-09-04 13:51 Mike Pagano
2024-08-29 16:49 Mike Pagano
2024-08-19 10:24 Mike Pagano
2024-08-14 15:14 Mike Pagano
2024-08-14 14:51 Mike Pagano
2024-08-14 14:10 Mike Pagano
2024-08-11 13:28 Mike Pagano
2024-08-10 15:43 Mike Pagano
2024-08-03 15:22 Mike Pagano
2024-07-27 13:46 Mike Pagano
2024-07-25 15:48 Mike Pagano
2024-07-25 12:09 Mike Pagano
2024-07-18 12:15 Mike Pagano
2024-07-15 11:15 Mike Pagano
2024-07-11 11:48 Mike Pagano
2024-07-09 10:45 Mike Pagano
2024-07-05 10:49 Mike Pagano
2024-06-27 12:32 Mike Pagano
2024-06-21 14:06 Mike Pagano
2024-06-16 14:33 Mike Pagano
2024-06-12 10:23 Mike Pagano
2024-05-25 15:17 Mike Pagano
2024-05-17 11:49 Mike Pagano
2024-05-17 11:35 Mike Pagano
2024-05-05 18:06 Mike Pagano
2024-05-02 15:01 Mike Pagano
2024-04-27 22:05 Mike Pagano
2024-04-27 17:21 Mike Pagano
2024-04-27 17:05 Mike Pagano
2024-04-18 6:38 Alice Ferrazzi
2024-04-18 3:05 Alice Ferrazzi
2024-04-13 13:06 Mike Pagano
2024-04-11 14:49 Mike Pagano
2024-04-10 15:09 Mike Pagano
2024-04-04 19:06 Mike Pagano
2024-04-03 14:03 Mike Pagano
2024-03-27 11:24 Mike Pagano
2024-03-15 22:00 Mike Pagano
2024-03-06 18:07 Mike Pagano
2024-03-02 22:37 Mike Pagano
2024-03-01 13:06 Mike Pagano
2024-02-23 13:25 Mike Pagano
2024-02-23 12:36 Mike Pagano
2024-02-22 13:39 Mike Pagano
2024-02-16 19:06 Mike Pagano
2024-02-16 18:59 Mike Pagano
2024-02-06 17:27 Mike Pagano
2024-02-06 15:38 Mike Pagano
2024-02-06 15:34 Mike Pagano
2024-02-05 21:04 Mike Pagano
2024-02-05 21:00 Mike Pagano
2024-02-01 23:18 Mike Pagano
2024-02-01 1:22 Mike Pagano
2024-01-26 22:48 Mike Pagano
2024-01-26 0:08 Mike Pagano
2024-01-25 13:49 Mike Pagano
2024-01-20 11:45 Mike Pagano
2024-01-15 18:46 Mike Pagano
2024-01-10 17:20 Mike Pagano
2024-01-10 17:16 Mike Pagano
2024-01-05 14:49 Mike Pagano
2024-01-04 15:36 Mike Pagano
2024-01-01 13:45 Mike Pagano
2023-12-20 16:55 Mike Pagano
2023-12-17 14:55 Mike Pagano
2023-12-13 18:26 Mike Pagano
2023-12-11 14:19 Mike Pagano
2023-12-08 12:01 Mike Pagano
2023-12-08 10:54 Mike Pagano
2023-12-07 18:53 Mike Pagano
2023-12-03 11:24 Mike Pagano
2023-12-03 11:15 Mike Pagano
2023-12-01 10:31 Mike Pagano
2023-11-28 18:16 Mike Pagano
2023-11-28 17:50 Mike Pagano
2023-11-20 11:40 Mike Pagano
2023-11-19 15:18 Mike Pagano
2023-11-19 14:41 Mike Pagano
2023-11-08 11:52 Mike Pagano
2023-10-30 11:30 Mike Pagano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1726682572.d680fe6b99bd33137e4e0953dc99afe62972f09a.mpagano@gentoo \
--to=mpagano@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox