From: "Mike Pagano" <mpagano@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/linux-patches:6.12 commit in: /
Date: Thu, 09 Jan 2025 13:51:47 +0000 (UTC) [thread overview]
Message-ID: <1736430684.dce11bba7397f8cff2d315b9195b222824bbeed4.mpagano@gentoo> (raw)
commit: dce11bba7397f8cff2d315b9195b222824bbeed4
Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jan 9 13:51:24 2025 +0000
Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jan 9 13:51:24 2025 +0000
URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dce11bba
Linux patch 6.12.9
Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
0000_README | 4 +
1008_linux-6.12.9.patch | 6461 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 6465 insertions(+)
diff --git a/0000_README b/0000_README
index 483a9fde..29d9187b 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch: 1007_linux-6.12.8.patch
From: https://www.kernel.org
Desc: Linux 6.12.8
+Patch: 1008_linux-6.12.9.patch
+From: https://www.kernel.org
+Desc: Linux 6.12.9
+
Patch: 1510_fs-enable-link-security-restrictions-by-default.patch
From: http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
Desc: Enable link security restrictions by default.
diff --git a/1008_linux-6.12.9.patch b/1008_linux-6.12.9.patch
new file mode 100644
index 00000000..9db1b6b3
--- /dev/null
+++ b/1008_linux-6.12.9.patch
@@ -0,0 +1,6461 @@
+diff --git a/Documentation/admin-guide/laptops/thinkpad-acpi.rst b/Documentation/admin-guide/laptops/thinkpad-acpi.rst
+index 7f674a6cfa8a7b..4ab0fef7d440d1 100644
+--- a/Documentation/admin-guide/laptops/thinkpad-acpi.rst
++++ b/Documentation/admin-guide/laptops/thinkpad-acpi.rst
+@@ -445,8 +445,10 @@ event code Key Notes
+ 0x1008 0x07 FN+F8 IBM: toggle screen expand
+ Lenovo: configure UltraNav,
+ or toggle screen expand.
+- On newer platforms (2024+)
+- replaced by 0x131f (see below)
++ On 2024 platforms replaced by
++ 0x131f (see below) and on newer
++ platforms (2025 +) keycode is
++ replaced by 0x1401 (see below).
+
+ 0x1009 0x08 FN+F9 -
+
+@@ -506,9 +508,11 @@ event code Key Notes
+
+ 0x1019 0x18 unknown
+
+-0x131f ... FN+F8 Platform Mode change.
++0x131f ... FN+F8 Platform Mode change (2024 systems).
+ Implemented in driver.
+
++0x1401 ... FN+F8 Platform Mode change (2025 + systems).
++ Implemented in driver.
+ ... ... ...
+
+ 0x1020 0x1F unknown
+diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
+index df20a3c9c74479..ec89115c74e4d3 100644
+--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
++++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
+@@ -90,7 +90,7 @@ properties:
+ adi,dsi-lanes:
+ description: Number of DSI data lanes connected to the DSI host.
+ $ref: /schemas/types.yaml#/definitions/uint32
+- enum: [ 1, 2, 3, 4 ]
++ enum: [ 2, 3, 4 ]
+
+ "#sound-dai-cells":
+ const: 0
+diff --git a/Makefile b/Makefile
+index 8a10105c2539cf..80151f53d8ee0f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 12
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Baby Opossum Posse
+
+diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
+index 5b248814204147..69c6e71fa1e6ba 100644
+--- a/arch/arc/Kconfig
++++ b/arch/arc/Kconfig
+@@ -297,7 +297,6 @@ config ARC_PAGE_SIZE_16K
+ config ARC_PAGE_SIZE_4K
+ bool "4KB"
+ select HAVE_PAGE_SIZE_4KB
+- depends on ARC_MMU_V3 || ARC_MMU_V4
+
+ endchoice
+
+@@ -474,7 +473,8 @@ config HIGHMEM
+
+ config ARC_HAS_PAE40
+ bool "Support for the 40-bit Physical Address Extension"
+- depends on ISA_ARCV2
++ depends on MMU_V4
++ depends on !ARC_PAGE_SIZE_4K
+ select HIGHMEM
+ select PHYS_ADDR_T_64BIT
+ help
+diff --git a/arch/arc/Makefile b/arch/arc/Makefile
+index 2390dd042e3636..fb98478ed1ab09 100644
+--- a/arch/arc/Makefile
++++ b/arch/arc/Makefile
+@@ -6,7 +6,7 @@
+ KBUILD_DEFCONFIG := haps_hs_smp_defconfig
+
+ ifeq ($(CROSS_COMPILE),)
+-CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-)
++CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux- arc-linux-gnu-)
+ endif
+
+ cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
+diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
+index 58045c89834045..76f43db0890fcd 100644
+--- a/arch/arc/include/asm/cmpxchg.h
++++ b/arch/arc/include/asm/cmpxchg.h
+@@ -48,7 +48,7 @@
+ \
+ switch(sizeof((_p_))) { \
+ case 1: \
+- _prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)_p_, (uintptr_t)_o_, (uintptr_t)_n_); \
++ _prev_ = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *__force)_p_, (uintptr_t)_o_, (uintptr_t)_n_); \
+ break; \
+ case 4: \
+ _prev_ = __cmpxchg(_p_, _o_, _n_); \
+diff --git a/arch/arc/net/bpf_jit_arcv2.c b/arch/arc/net/bpf_jit_arcv2.c
+index 4458e409ca0a84..6d989b6d88c69b 100644
+--- a/arch/arc/net/bpf_jit_arcv2.c
++++ b/arch/arc/net/bpf_jit_arcv2.c
+@@ -2916,7 +2916,7 @@ bool check_jmp_32(u32 curr_off, u32 targ_off, u8 cond)
+ addendum = (cond == ARC_CC_AL) ? 0 : INSN_len_normal;
+ disp = get_displacement(curr_off + addendum, targ_off);
+
+- if (ARC_CC_AL)
++ if (cond == ARC_CC_AL)
+ return is_valid_far_disp(disp);
+ else
+ return is_valid_near_disp(disp);
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 28b4312f25631c..f558be868a50b6 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -7067,6 +7067,7 @@ __init int intel_pmu_init(void)
+
+ case INTEL_METEORLAKE:
+ case INTEL_METEORLAKE_L:
++ case INTEL_ARROWLAKE_U:
+ intel_pmu_init_hybrid(hybrid_big_small);
+
+ x86_pmu.pebs_latency_data = cmt_latency_data;
+diff --git a/block/blk.h b/block/blk.h
+index 88fab6a81701ed..1426f9c281973e 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -469,11 +469,6 @@ static inline bool bio_zone_write_plugging(struct bio *bio)
+ {
+ return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
+ }
+-static inline bool bio_is_zone_append(struct bio *bio)
+-{
+- return bio_op(bio) == REQ_OP_ZONE_APPEND ||
+- bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
+-}
+ void blk_zone_write_plug_bio_merged(struct bio *bio);
+ void blk_zone_write_plug_init_request(struct request *rq);
+ static inline void blk_zone_update_request_bio(struct request *rq,
+@@ -522,10 +517,6 @@ static inline bool bio_zone_write_plugging(struct bio *bio)
+ {
+ return false;
+ }
+-static inline bool bio_is_zone_append(struct bio *bio)
+-{
+- return false;
+-}
+ static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
+ {
+ }
+diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
+index b2cb157703c57f..c409fc7e061869 100644
+--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
++++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
+@@ -278,7 +278,8 @@ static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
+
+ #else /* !CONFIG_RESET_CONTROLLER */
+
+-static int clk_imx8mp_audiomix_reset_controller_register(struct clk_imx8mp_audiomix_priv *priv)
++static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
++ struct clk_imx8mp_audiomix_priv *priv)
+ {
+ return 0;
+ }
+diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
+index 17e32ae08720cb..1015fab9525157 100644
+--- a/drivers/clk/thead/clk-th1520-ap.c
++++ b/drivers/clk/thead/clk-th1520-ap.c
+@@ -779,6 +779,13 @@ static struct ccu_div dpu1_clk = {
+ },
+ };
+
++static CLK_FIXED_FACTOR_HW(emmc_sdio_ref_clk, "emmc-sdio-ref",
++ &video_pll_clk.common.hw, 4, 1, 0);
++
++static const struct clk_parent_data emmc_sdio_ref_clk_pd[] = {
++ { .hw = &emmc_sdio_ref_clk.hw },
++};
++
+ static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, BIT(4), 0);
+ static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, BIT(5), 0);
+ static CCU_GATE(CLK_AON2CPU_A2X, aon2cpu_a2x_clk, "aon2cpu-a2x", axi4_cpusys2_aclk_pd,
+@@ -798,7 +805,7 @@ static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", p
+ 0x150, BIT(12), 0);
+ static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
+ static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, BIT(13), 0);
+-static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", video_pll_clk_pd, 0x204, BIT(30), 0);
++static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", emmc_sdio_ref_clk_pd, 0x204, BIT(30), 0);
+ static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, BIT(26), 0);
+ static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, BIT(24), 0);
+ static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, BIT(23), 0);
+@@ -1059,6 +1066,10 @@ static int th1520_clk_probe(struct platform_device *pdev)
+ return ret;
+ priv->hws[CLK_PLL_GMAC_100M] = &gmac_pll_clk_100m.hw;
+
++ ret = devm_clk_hw_register(dev, &emmc_sdio_ref_clk.hw);
++ if (ret)
++ return ret;
++
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, priv);
+ if (ret)
+ return ret;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 51904906545e59..45e28726e148e9 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -3721,8 +3721,12 @@ static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
+ continue;
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+- if (r)
++ if (r) {
++ DRM_ERROR("resume of IP block <%s> failed %d\n",
++ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
++ }
++ adev->ip_blocks[i].status.hw = true;
+ }
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+index c100845409f794..ffdb966c4127ee 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+@@ -45,6 +45,8 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
+ MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
+ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
+ MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
++MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin");
++MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
+
+ #define GFX9_MEC_HPD_SIZE 4096
+ #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
+@@ -574,8 +576,12 @@ static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
+ {
+ int err;
+
+- err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
+- "amdgpu/%s_mec.bin", chip_name);
++ if (amdgpu_sriov_vf(adev))
++ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
++ "amdgpu/%s_sjt_mec.bin", chip_name);
++ else
++ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
++ "amdgpu/%s_mec.bin", chip_name);
+ if (err)
+ goto out;
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+index 8ee3d07ffbdfa2..f31e9fbf634a0f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+@@ -306,7 +306,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
+ spage = migrate_pfn_to_page(migrate->src[i]);
+ if (spage && !is_zone_device_page(spage)) {
+ src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
+- DMA_TO_DEVICE);
++ DMA_BIDIRECTIONAL);
+ r = dma_mapping_error(dev, src[i]);
+ if (r) {
+ dev_err(dev, "%s: fail %d dma_map_page\n",
+@@ -630,7 +630,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
+ goto out_oom;
+ }
+
+- dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
++ dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ r = dma_mapping_error(dev, dst[i]);
+ if (r) {
+ dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+index 61f4a38e7d2bf6..8f786592143b6c 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+@@ -153,7 +153,16 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
+ ADV7511_AUDIO_CFG3_LEN_MASK, len);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
+ ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
+- regmap_write(adv7511->regmap, 0x73, 0x1);
++
++ /* send current Audio infoframe values while updating */
++ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
++ BIT(5), BIT(5));
++
++ regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1);
++
++ /* use Audio infoframe updated info */
++ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
++ BIT(5), 0);
+
+ return 0;
+ }
+@@ -184,8 +193,9 @@ static int audio_startup(struct device *dev, void *data)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
+ BIT(7) | BIT(6), BIT(7));
+ /* use Audio infoframe updated info */
+- regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
++ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), 0);
++
+ /* enable SPDIF receiver */
+ if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+index eb5919b382635e..a13b3d8ab6ac60 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+@@ -1241,8 +1241,10 @@ static int adv7511_probe(struct i2c_client *i2c)
+ return ret;
+
+ ret = adv7511_init_regulators(adv7511);
+- if (ret)
+- return dev_err_probe(dev, ret, "failed to init regulators\n");
++ if (ret) {
++ dev_err_probe(dev, ret, "failed to init regulators\n");
++ goto err_of_node_put;
++ }
+
+ /*
+ * The power down GPIO is optional. If present, toggle it from active to
+@@ -1363,6 +1365,8 @@ static int adv7511_probe(struct i2c_client *i2c)
+ i2c_unregister_device(adv7511->i2c_edid);
+ uninit_regulators:
+ adv7511_uninit_regulators(adv7511);
++err_of_node_put:
++ of_node_put(adv7511->host_node);
+
+ return ret;
+ }
+@@ -1371,6 +1375,8 @@ static void adv7511_remove(struct i2c_client *i2c)
+ {
+ struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
++ of_node_put(adv7511->host_node);
++
+ adv7511_uninit_regulators(adv7511);
+
+ drm_bridge_remove(&adv7511->bridge);
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+index 4481489aaf5ebf..122ad91e8a3293 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+@@ -172,7 +172,7 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+
+ of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
+
+- if (num_lanes < 1 || num_lanes > 4)
++ if (num_lanes < 2 || num_lanes > 4)
+ return -EINVAL;
+
+ adv->num_dsi_lanes = num_lanes;
+@@ -181,8 +181,6 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+ if (!adv->host_node)
+ return -ENODEV;
+
+- of_node_put(adv->host_node);
+-
+ adv->use_timing_gen = !of_property_read_bool(np,
+ "adi,disable-timing-generator");
+
+diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+index 4a6c3040ca15ef..f11309efff3398 100644
+--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
++++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+@@ -2084,14 +2084,6 @@ static void intel_c10_pll_program(struct drm_i915_private *i915,
+ 0, C10_VDR_CTRL_MSGBUS_ACCESS,
+ MB_WRITE_COMMITTED);
+
+- /* Custom width needs to be programmed to 0 for both the phy lanes */
+- intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
+- C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
+- MB_WRITE_COMMITTED);
+- intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CONTROL(1),
+- 0, C10_VDR_CTRL_UPDATE_CFG,
+- MB_WRITE_COMMITTED);
+-
+ /* Program the pll values only for the master lane */
+ for (i = 0; i < ARRAY_SIZE(pll_state->pll); i++)
+ intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_PLL(i),
+@@ -2101,6 +2093,10 @@ static void intel_c10_pll_program(struct drm_i915_private *i915,
+ intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CMN(0), pll_state->cmn, MB_WRITE_COMMITTED);
+ intel_cx0_write(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_TX(0), pll_state->tx, MB_WRITE_COMMITTED);
+
++ /* Custom width needs to be programmed to 0 for both the phy lanes */
++ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C10_VDR_CUSTOM_WIDTH,
++ C10_VDR_CUSTOM_WIDTH_MASK, C10_VDR_CUSTOM_WIDTH_8_10,
++ MB_WRITE_COMMITTED);
+ intel_cx0_rmw(encoder, INTEL_CX0_LANE0, PHY_C10_VDR_CONTROL(1),
+ 0, C10_VDR_CTRL_MASTER_LANE | C10_VDR_CTRL_UPDATE_CFG,
+ MB_WRITE_COMMITTED);
+diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
+index c864d101faf941..9378d5901c4939 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
++++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
+@@ -133,7 +133,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE;
+
+- if (GRAPHICS_VER(gt->i915) >= 12) {
++ if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
+ for (i = 0; i < I915_MAX_VCS; i++)
+ if (HAS_ENGINE(gt, _VCS(i)))
+ pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
+diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
+index 2a093540354e89..84e327b569252f 100644
+--- a/drivers/gpu/drm/xe/xe_bo.c
++++ b/drivers/gpu/drm/xe/xe_bo.c
+@@ -722,7 +722,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
+ new_mem->mem_type == XE_PL_SYSTEM) {
+ long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP,
+- true,
++ false,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0) {
+ ret = timeout;
+@@ -846,8 +846,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
+
+ out:
+ if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
+- ttm_bo->ttm)
++ ttm_bo->ttm) {
++ long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
++ DMA_RESV_USAGE_KERNEL,
++ false,
++ MAX_SCHEDULE_TIMEOUT);
++ if (timeout < 0)
++ ret = timeout;
++
+ xe_tt_unmap_sg(ttm_bo->ttm);
++ }
+
+ return ret;
+ }
+diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
+index c18e463092afa5..85aa3ab0da3b87 100644
+--- a/drivers/gpu/drm/xe/xe_devcoredump.c
++++ b/drivers/gpu/drm/xe/xe_devcoredump.c
+@@ -104,7 +104,11 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
+ drm_puts(&p, "\n**** GuC CT ****\n");
+ xe_guc_ct_snapshot_print(ss->ct, &p);
+
+- drm_puts(&p, "\n**** Contexts ****\n");
++ /*
++ * Don't add a new section header here because the mesa debug decoder
++ * tool expects the context information to be in the 'GuC CT' section.
++ */
++ /* drm_puts(&p, "\n**** Contexts ****\n"); */
+ xe_guc_exec_queue_snapshot_print(ss->ge, &p);
+
+ drm_puts(&p, "\n**** Job ****\n");
+@@ -358,6 +362,15 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+ char buff[ASCII85_BUFSZ], *line_buff;
+ size_t line_pos = 0;
+
++ /*
++ * Splitting blobs across multiple lines is not compatible with the mesa
++ * debug decoder tool. Note that even dropping the explicit '\n' below
++ * doesn't help because the GuC log is so big some underlying implementation
++ * still splits the lines at 512K characters. So just bail completely for
++ * the moment.
++ */
++ return;
++
+ #define DMESG_MAX_LINE_LEN 800
+ #define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */
+
+diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
+index fd0f3b3c9101d4..268cd3123be9d9 100644
+--- a/drivers/gpu/drm/xe/xe_exec_queue.c
++++ b/drivers/gpu/drm/xe/xe_exec_queue.c
+@@ -8,6 +8,7 @@
+ #include <linux/nospec.h>
+
+ #include <drm/drm_device.h>
++#include <drm/drm_drv.h>
+ #include <drm/drm_file.h>
+ #include <uapi/drm/xe_drm.h>
+
+@@ -762,9 +763,11 @@ bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
+ */
+ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
+ {
++ struct xe_device *xe = gt_to_xe(q->gt);
+ struct xe_file *xef;
+ struct xe_lrc *lrc;
+ u32 old_ts, new_ts;
++ int idx;
+
+ /*
+ * Jobs that are run during driver load may use an exec_queue, but are
+@@ -774,6 +777,10 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
+ if (!q->vm || !q->vm->xef)
+ return;
+
++ /* Synchronize with unbind while holding the xe file open */
++ if (!drm_dev_enter(&xe->drm, &idx))
++ return;
++
+ xef = q->vm->xef;
+
+ /*
+@@ -787,6 +794,8 @@ void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
+ lrc = q->lrc[0];
+ new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
+ xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
++
++ drm_dev_exit(idx);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+index afdb477ecf833d..c9ed996b9cb0c3 100644
+--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
++++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+@@ -2038,7 +2038,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
+ valid_any = valid_any || (valid_ggtt && is_primary);
+
+ if (IS_DGFX(xe)) {
+- bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid);
++ bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
+
+ valid_any = valid_any || (valid_lmem && is_primary);
+ valid_all = valid_all && valid_lmem;
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 64ace0b968f07f..91db10515d7472 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -690,6 +690,7 @@ cma_validate_port(struct ib_device *device, u32 port,
+ int bound_if_index = dev_addr->bound_dev_if;
+ int dev_type = dev_addr->dev_type;
+ struct net_device *ndev = NULL;
++ struct net_device *pdev = NULL;
+
+ if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
+ goto out;
+@@ -714,6 +715,21 @@ cma_validate_port(struct ib_device *device, u32 port,
+
+ rcu_read_lock();
+ ndev = rcu_dereference(sgid_attr->ndev);
++ if (ndev->ifindex != bound_if_index) {
++ pdev = dev_get_by_index_rcu(dev_addr->net, bound_if_index);
++ if (pdev) {
++ if (is_vlan_dev(pdev)) {
++ pdev = vlan_dev_real_dev(pdev);
++ if (ndev->ifindex == pdev->ifindex)
++ bound_if_index = pdev->ifindex;
++ }
++ if (is_vlan_dev(ndev)) {
++ pdev = vlan_dev_real_dev(ndev);
++ if (bound_if_index == pdev->ifindex)
++ bound_if_index = ndev->ifindex;
++ }
++ }
++ }
+ if (!net_eq(dev_net(ndev), dev_addr->net) ||
+ ndev->ifindex != bound_if_index) {
+ rdma_put_gid_attr(sgid_attr);
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index 7dc8e2ec62cc8b..f121899863034a 100644
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -2802,8 +2802,8 @@ int rdma_nl_notify_event(struct ib_device *device, u32 port_num,
+ enum rdma_nl_notify_event_type type)
+ {
+ struct sk_buff *skb;
++ int ret = -EMSGSIZE;
+ struct net *net;
+- int ret = 0;
+ void *nlh;
+
+ net = read_pnet(&device->coredev.rdma_net);
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index a4cce360df2178..edef79daed3fa8 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -161,7 +161,7 @@ static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
+ {
+ const void __user *res = iter->cur;
+
+- if (iter->cur + len > iter->end)
++ if (len > iter->end - iter->cur)
+ return (void __force __user *)ERR_PTR(-ENOSPC);
+ iter->cur += len;
+ return res;
+@@ -2010,11 +2010,13 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
+ ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
+- wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
++ wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
++ cmd.wr_count));
+ if (IS_ERR(wqes))
+ return PTR_ERR(wqes);
+- sgls = uverbs_request_next_ptr(
+- &iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
++ sgls = uverbs_request_next_ptr(&iter,
++ size_mul(cmd.sge_count,
++ sizeof(struct ib_uverbs_sge)));
+ if (IS_ERR(sgls))
+ return PTR_ERR(sgls);
+ ret = uverbs_request_finish(&iter);
+@@ -2200,11 +2202,11 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
+ if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
+ return ERR_PTR(-EINVAL);
+
+- wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
++ wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
+ if (IS_ERR(wqes))
+ return ERR_CAST(wqes);
+- sgls = uverbs_request_next_ptr(
+- iter, sge_count * sizeof(struct ib_uverbs_sge));
++ sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
++ sizeof(struct ib_uverbs_sge)));
+ if (IS_ERR(sgls))
+ return ERR_CAST(sgls);
+ ret = uverbs_request_finish(iter);
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 160096792224b1..b20cffcc3e7d2d 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -156,7 +156,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
+
+ ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
+ ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
+- ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
++ ib_attr->hw_ver = rdev->en_dev->pdev->revision;
+ ib_attr->max_qp = dev_attr->max_qp;
+ ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
+ ib_attr->device_cap_flags =
+@@ -2107,18 +2107,20 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
+ }
+ }
+
+- if (qp_attr_mask & IB_QP_PATH_MTU) {
+- qp->qplib_qp.modify_flags |=
+- CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
+- qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
+- qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
+- } else if (qp_attr->qp_state == IB_QPS_RTR) {
+- qp->qplib_qp.modify_flags |=
+- CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
+- qp->qplib_qp.path_mtu =
+- __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
+- qp->qplib_qp.mtu =
+- ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
++ if (qp_attr->qp_state == IB_QPS_RTR) {
++ enum ib_mtu qpmtu;
++
++ qpmtu = iboe_get_mtu(rdev->netdev->mtu);
++ if (qp_attr_mask & IB_QP_PATH_MTU) {
++ if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
++ ib_mtu_enum_to_int(qpmtu))
++ return -EINVAL;
++ qpmtu = qp_attr->path_mtu;
++ }
++
++ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
++ qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
++ qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
+ }
+
+ if (qp_attr_mask & IB_QP_TIMEOUT) {
+@@ -2763,7 +2765,8 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
+ wr = wr->next;
+ }
+ bnxt_qplib_post_send_db(&qp->qplib_qp);
+- bnxt_ud_qp_hw_stall_workaround(qp);
++ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
++ bnxt_ud_qp_hw_stall_workaround(qp);
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+ return rc;
+ }
+@@ -2875,7 +2878,8 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
+ wr = wr->next;
+ }
+ bnxt_qplib_post_send_db(&qp->qplib_qp);
+- bnxt_ud_qp_hw_stall_workaround(qp);
++ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
++ bnxt_ud_qp_hw_stall_workaround(qp);
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+
+ return rc;
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 2ac8ddbed576f5..8abd1b723f8ff5 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -1435,11 +1435,8 @@ static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
+
+ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
+ {
+- int mask = IB_QP_STATE;
+- struct ib_qp_attr qp_attr;
+ struct bnxt_re_qp *qp;
+
+- qp_attr.qp_state = IB_QPS_ERR;
+ mutex_lock(&rdev->qp_lock);
+ list_for_each_entry(qp, &rdev->qp_list, list) {
+ /* Modify the state of all QPs except QP1/Shadow QP */
+@@ -1447,12 +1444,9 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
+ if (qp->qplib_qp.state !=
+ CMDQ_MODIFY_QP_NEW_STATE_RESET &&
+ qp->qplib_qp.state !=
+- CMDQ_MODIFY_QP_NEW_STATE_ERR) {
++ CMDQ_MODIFY_QP_NEW_STATE_ERR)
+ bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
+ 1, IB_EVENT_QP_FATAL);
+- bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, mask,
+- NULL);
+- }
+ }
+ }
+ mutex_unlock(&rdev->qp_lock);
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 7ad83566ab0f41..828e2f9808012b 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -658,13 +658,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
+ if (rc)
+ return rc;
+-
+- srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
+- GFP_KERNEL);
+- if (!srq->swq) {
+- rc = -ENOMEM;
+- goto fail;
+- }
+ srq->dbinfo.flags = 0;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_SRQ,
+@@ -693,9 +686,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ spin_lock_init(&srq->lock);
+ srq->start_idx = 0;
+ srq->last_idx = srq->hwq.max_elements - 1;
+- for (idx = 0; idx < srq->hwq.max_elements; idx++)
+- srq->swq[idx].next_idx = idx + 1;
+- srq->swq[srq->last_idx].next_idx = -1;
++ if (!srq->hwq.is_user) {
++ srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
++ GFP_KERNEL);
++ if (!srq->swq) {
++ rc = -ENOMEM;
++ goto fail;
++ }
++ for (idx = 0; idx < srq->hwq.max_elements; idx++)
++ srq->swq[idx].next_idx = idx + 1;
++ srq->swq[srq->last_idx].next_idx = -1;
++ }
+
+ srq->id = le32_to_cpu(resp.xid);
+ srq->dbinfo.hwq = &srq->hwq;
+@@ -999,9 +1000,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ u32 tbl_indx;
+ u16 nsge;
+
+- if (res->dattr)
+- qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
+-
++ qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
+ sq->dbinfo.flags = 0;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_QP,
+@@ -1033,7 +1032,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ : 0;
+ /* Update msn tbl size */
+ if (qp->is_host_msn_tbl && psn_sz) {
+- hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
++ if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
++ hwq_attr.aux_depth =
++ roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
++ else
++ hwq_attr.aux_depth =
++ roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
+ qp->msn_tbl_sz = hwq_attr.aux_depth;
+ qp->msn = 0;
+ }
+@@ -1043,13 +1047,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ if (rc)
+ return rc;
+
+- rc = bnxt_qplib_alloc_init_swq(sq);
+- if (rc)
+- goto fail_sq;
+-
+- if (psn_sz)
+- bnxt_qplib_init_psn_ptr(qp, psn_sz);
++ if (!sq->hwq.is_user) {
++ rc = bnxt_qplib_alloc_init_swq(sq);
++ if (rc)
++ goto fail_sq;
+
++ if (psn_sz)
++ bnxt_qplib_init_psn_ptr(qp, psn_sz);
++ }
+ req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+ pbl = &sq->hwq.pbl[PBL_LVL_0];
+ req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+@@ -1075,9 +1080,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
+ if (rc)
+ goto sq_swq;
+- rc = bnxt_qplib_alloc_init_swq(rq);
+- if (rc)
+- goto fail_rq;
++ if (!rq->hwq.is_user) {
++ rc = bnxt_qplib_alloc_init_swq(rq);
++ if (rc)
++ goto fail_rq;
++ }
+
+ req.rq_size = cpu_to_le32(rq->max_wqe);
+ pbl = &rq->hwq.pbl[PBL_LVL_0];
+@@ -1173,9 +1180,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ rq->dbinfo.db = qp->dpi->dbr;
+ rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
+ }
++ spin_lock_bh(&rcfw->tbl_lock);
+ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
+ rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
+ rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
++ spin_unlock_bh(&rcfw->tbl_lock);
+
+ return 0;
+ fail:
+@@ -2596,10 +2605,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
+ bnxt_qplib_add_flush_qp(qp);
+ } else {
+ /* Before we complete, do WA 9060 */
+- if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
+- cqe_sq_cons)) {
+- *lib_qp = qp;
+- goto out;
++ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
++ if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
++ cqe_sq_cons)) {
++ *lib_qp = qp;
++ goto out;
++ }
+ }
+ if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
+ cqe->status = CQ_REQ_STATUS_OK;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index f55958e5fddb4a..d8c71c024613bf 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -114,7 +114,6 @@ struct bnxt_qplib_sge {
+ u32 size;
+ };
+
+-#define BNXT_QPLIB_QP_MAX_SGL 6
+ struct bnxt_qplib_swq {
+ u64 wr_id;
+ int next_idx;
+@@ -154,7 +153,7 @@ struct bnxt_qplib_swqe {
+ #define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
+ #define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
+ #define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
+- struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL];
++ struct bnxt_qplib_sge sg_list[BNXT_VAR_MAX_SGE];
+ int num_sge;
+ /* Max inline data is 96 bytes */
+ u32 inline_len;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+index e82bd37158ad6c..7a099580ca8bff 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+@@ -424,7 +424,8 @@ static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw,
+
+ /* Prevent posting if f/w is not in a state to process */
+ if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
+- return bnxt_qplib_map_rc(opcode);
++ return -ENXIO;
++
+ if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
+ return -ETIMEDOUT;
+
+@@ -493,7 +494,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+
+ rc = __send_message_basic_sanity(rcfw, msg, opcode);
+ if (rc)
+- return rc;
++ return rc == -ENXIO ? bnxt_qplib_map_rc(opcode) : rc;
+
+ rc = __send_message(rcfw, msg, opcode);
+ if (rc)
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+index e29fbbdab9fd68..3cca7b1395f6a7 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+@@ -129,12 +129,18 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+ attr->max_qp_init_rd_atom =
+ sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
+ BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
+- attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
+- /*
+- * 128 WQEs needs to be reserved for the HW (8916). Prevent
+- * reporting the max number
+- */
+- attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
++ attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
++ if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) {
++ /*
++ * 128 WQEs needs to be reserved for the HW (8916). Prevent
++ * reporting the max number on legacy devices
++ */
++ attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
++ }
++
++ /* Adjust for max_qp_wqes for variable wqe */
++ if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
++ attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1;
+
+ attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ?
+ min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index f84521be3bea4a..605562122ecce2 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -931,6 +931,7 @@ struct hns_roce_hem_item {
+ size_t count; /* max ba numbers */
+ int start; /* start buf offset in this hem */
+ int end; /* end buf offset in this hem */
++ bool exist_bt;
+ };
+
+ /* All HEM items are linked in a tree structure */
+@@ -959,6 +960,7 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
+ }
+ }
+
++ hem->exist_bt = exist_bt;
+ hem->count = count;
+ hem->start = start;
+ hem->end = end;
+@@ -969,22 +971,22 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
+ }
+
+ static void hem_list_free_item(struct hns_roce_dev *hr_dev,
+- struct hns_roce_hem_item *hem, bool exist_bt)
++ struct hns_roce_hem_item *hem)
+ {
+- if (exist_bt)
++ if (hem->exist_bt)
+ dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
+ hem->addr, hem->dma_addr);
+ kfree(hem);
+ }
+
+ static void hem_list_free_all(struct hns_roce_dev *hr_dev,
+- struct list_head *head, bool exist_bt)
++ struct list_head *head)
+ {
+ struct hns_roce_hem_item *hem, *temp_hem;
+
+ list_for_each_entry_safe(hem, temp_hem, head, list) {
+ list_del(&hem->list);
+- hem_list_free_item(hr_dev, hem, exist_bt);
++ hem_list_free_item(hr_dev, hem);
+ }
+ }
+
+@@ -1084,6 +1086,10 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
+
+ for (i = 0; i < region_cnt; i++) {
+ r = (struct hns_roce_buf_region *)®ions[i];
++ /* when r->hopnum = 0, the region should not occupy root_ba. */
++ if (!r->hopnum)
++ continue;
++
+ if (r->hopnum > 1) {
+ step = hem_list_calc_ba_range(r->hopnum, 1, unit);
+ if (step > 0)
+@@ -1177,7 +1183,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
+
+ err_exit:
+ for (level = 1; level < hopnum; level++)
+- hem_list_free_all(hr_dev, &temp_list[level], true);
++ hem_list_free_all(hr_dev, &temp_list[level]);
+
+ return ret;
+ }
+@@ -1218,16 +1224,26 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
+ {
+ struct hns_roce_hem_item *hem;
+
++ /* This is on the has_mtt branch, if r->hopnum
++ * is 0, there is no root_ba to reuse for the
++ * region's fake hem, so a dma_alloc request is
++ * necessary here.
++ */
+ hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
+- r->count, false);
++ r->count, !r->hopnum);
+ if (!hem)
+ return -ENOMEM;
+
+- hem_list_assign_bt(hem, cpu_base, phy_base);
++ /* The root_ba can be reused only when r->hopnum > 0. */
++ if (r->hopnum)
++ hem_list_assign_bt(hem, cpu_base, phy_base);
+ list_add(&hem->list, branch_head);
+ list_add(&hem->sibling, leaf_head);
+
+- return r->count;
++ /* If r->hopnum == 0, 0 is returned,
++ * so that the root_bt entry is not occupied.
++ */
++ return r->hopnum ? r->count : 0;
+ }
+
+ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
+@@ -1271,7 +1287,7 @@ setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
+ return -ENOMEM;
+
+ total = 0;
+- for (i = 0; i < region_cnt && total < max_ba_num; i++) {
++ for (i = 0; i < region_cnt && total <= max_ba_num; i++) {
+ r = ®ions[i];
+ if (!r->count)
+ continue;
+@@ -1337,9 +1353,9 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
+ region_cnt);
+ if (ret) {
+ for (i = 0; i < region_cnt; i++)
+- hem_list_free_all(hr_dev, &head.branch[i], false);
++ hem_list_free_all(hr_dev, &head.branch[i]);
+
+- hem_list_free_all(hr_dev, &head.root, true);
++ hem_list_free_all(hr_dev, &head.root);
+ }
+
+ return ret;
+@@ -1402,10 +1418,9 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
+
+ for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
+ for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
+- hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
+- j != 0);
++ hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j]);
+
+- hem_list_free_all(hr_dev, &hem_list->root_bt, true);
++ hem_list_free_all(hr_dev, &hem_list->root_bt);
+ INIT_LIST_HEAD(&hem_list->btm_bt);
+ hem_list->root_ba = 0;
+ }
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 697b17cca02e71..0144e7210d05a1 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -468,7 +468,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
+ valid_num_sge = calc_wr_sge_num(wr, &msg_len);
+
+ ret = set_ud_opcode(ud_sq_wqe, wr);
+- if (WARN_ON(ret))
++ if (WARN_ON_ONCE(ret))
+ return ret;
+
+ ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
+@@ -572,7 +572,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
+
+ ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
+- if (WARN_ON(ret))
++ if (WARN_ON_ONCE(ret))
+ return ret;
+
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO,
+@@ -670,6 +670,10 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ #define HNS_ROCE_SL_SHIFT 2
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
+
++ if (unlikely(qp->state == IB_QPS_ERR)) {
++ flush_cqe(hr_dev, qp);
++ return;
++ }
+ /* All kinds of DirectWQE have the same header field layout */
+ hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
+@@ -5619,6 +5623,9 @@ static void put_dip_ctx_idx(struct hns_roce_dev *hr_dev,
+ {
+ struct hns_roce_dip *hr_dip = hr_qp->dip;
+
++ if (!hr_dip)
++ return;
++
+ xa_lock(&hr_dev->qp_table.dip_xa);
+
+ hr_dip->qp_cnt--;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index bf30b3a65a9ba9..55b9283bfc6f03 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -814,11 +814,6 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
+ mapped_cnt < page_cnt; i++) {
+ r = &mtr->hem_cfg.region[i];
+- /* if hopnum is 0, no need to map pages in this region */
+- if (!r->hopnum) {
+- mapped_cnt += r->count;
+- continue;
+- }
+
+ if (r->offset + r->count > page_cnt) {
+ ret = -EINVAL;
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index ac20ab3bbabf47..8c47cb4edd0a0a 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -2831,7 +2831,7 @@ static int mlx5_ib_get_plane_num(struct mlx5_core_dev *mdev, u8 *num_plane)
+ int err;
+
+ *num_plane = 0;
+- if (!MLX5_CAP_GEN(mdev, ib_virt))
++ if (!MLX5_CAP_GEN(mdev, ib_virt) || !MLX5_CAP_GEN_2(mdev, multiplane))
+ return 0;
+
+ err = mlx5_query_hca_vport_context(mdev, 0, 1, 0, &vport_ctx);
+@@ -3631,7 +3631,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
+ list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
+ list) {
+ if (dev->sys_image_guid == mpi->sys_image_guid &&
+- (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
++ (mlx5_core_native_port_num(mpi->mdev) - 1) == i &&
++ mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) {
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
+ }
+
+@@ -4776,7 +4777,8 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
+- if (dev->sys_image_guid == mpi->sys_image_guid)
++ if (dev->sys_image_guid == mpi->sys_image_guid &&
++ mlx5_core_same_coredev_type(dev->mdev, mpi->mdev))
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
+
+ if (bound) {
+diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
+index 255677bc12b2ab..1ba4a0c8726aed 100644
+--- a/drivers/infiniband/sw/rxe/rxe.c
++++ b/drivers/infiniband/sw/rxe/rxe.c
+@@ -40,6 +40,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
+ /* initialize rxe device parameters */
+ static void rxe_init_device_param(struct rxe_dev *rxe)
+ {
++ struct net_device *ndev;
++
+ rxe->max_inline_data = RXE_MAX_INLINE_DATA;
+
+ rxe->attr.vendor_id = RXE_VENDOR_ID;
+@@ -71,8 +73,15 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
+ rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
+ rxe->attr.max_pkeys = RXE_MAX_PKEYS;
+ rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
++
++ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
++ if (!ndev)
++ return;
++
+ addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
+- rxe->ndev->dev_addr);
++ ndev->dev_addr);
++
++ dev_put(ndev);
+
+ rxe->max_ucontext = RXE_MAX_UCONTEXT;
+ }
+@@ -109,10 +118,15 @@ static void rxe_init_port_param(struct rxe_port *port)
+ static void rxe_init_ports(struct rxe_dev *rxe)
+ {
+ struct rxe_port *port = &rxe->port;
++ struct net_device *ndev;
+
+ rxe_init_port_param(port);
++ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
++ if (!ndev)
++ return;
+ addrconf_addr_eui48((unsigned char *)&port->port_guid,
+- rxe->ndev->dev_addr);
++ ndev->dev_addr);
++ dev_put(ndev);
+ spin_lock_init(&port->port_lock);
+ }
+
+@@ -167,12 +181,13 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
+ /* called by ifc layer to create new rxe device.
+ * The caller should allocate memory for rxe by calling ib_alloc_device.
+ */
+-int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
++int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
++ struct net_device *ndev)
+ {
+ rxe_init(rxe);
+ rxe_set_mtu(rxe, mtu);
+
+- return rxe_register_device(rxe, ibdev_name);
++ return rxe_register_device(rxe, ibdev_name, ndev);
+ }
+
+ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
+diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
+index d8fb2c7af30a7e..fe7f9706673255 100644
+--- a/drivers/infiniband/sw/rxe/rxe.h
++++ b/drivers/infiniband/sw/rxe/rxe.h
+@@ -139,7 +139,8 @@ enum resp_states {
+
+ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
+
+-int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
++int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name,
++ struct net_device *ndev);
+
+ void rxe_rcv(struct sk_buff *skb);
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
+index 86cc2e18a7fdaf..07ff47bae31df9 100644
+--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
++++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
+@@ -31,10 +31,19 @@
+ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
+ {
+ unsigned char ll_addr[ETH_ALEN];
++ struct net_device *ndev;
++ int ret;
++
++ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
++ if (!ndev)
++ return -ENODEV;
+
+ ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
+
+- return dev_mc_add(rxe->ndev, ll_addr);
++ ret = dev_mc_add(ndev, ll_addr);
++ dev_put(ndev);
++
++ return ret;
+ }
+
+ /**
+@@ -47,10 +56,19 @@ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
+ static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
+ {
+ unsigned char ll_addr[ETH_ALEN];
++ struct net_device *ndev;
++ int ret;
++
++ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
++ if (!ndev)
++ return -ENODEV;
+
+ ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
+
+- return dev_mc_del(rxe->ndev, ll_addr);
++ ret = dev_mc_del(ndev, ll_addr);
++ dev_put(ndev);
++
++ return ret;
+ }
+
+ /**
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index 75d1407db52d4d..8cc64ceeb3569b 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -524,7 +524,16 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
+ */
+ const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
+ {
+- return rxe->ndev->name;
++ struct net_device *ndev;
++ char *ndev_name;
++
++ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
++ if (!ndev)
++ return NULL;
++ ndev_name = ndev->name;
++ dev_put(ndev);
++
++ return ndev_name;
+ }
+
+ int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
+@@ -536,10 +545,9 @@ int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
+ if (!rxe)
+ return -ENOMEM;
+
+- rxe->ndev = ndev;
+ ib_mark_name_assigned_by_user(&rxe->ib_dev);
+
+- err = rxe_add(rxe, ndev->mtu, ibdev_name);
++ err = rxe_add(rxe, ndev->mtu, ibdev_name, ndev);
+ if (err) {
+ ib_dealloc_device(&rxe->ib_dev);
+ return err;
+@@ -587,10 +595,18 @@ void rxe_port_down(struct rxe_dev *rxe)
+
+ void rxe_set_port_state(struct rxe_dev *rxe)
+ {
+- if (netif_running(rxe->ndev) && netif_carrier_ok(rxe->ndev))
++ struct net_device *ndev;
++
++ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
++ if (!ndev)
++ return;
++
++ if (netif_running(ndev) && netif_carrier_ok(ndev))
+ rxe_port_up(rxe);
+ else
+ rxe_port_down(rxe);
++
++ dev_put(ndev);
+ }
+
+ static int rxe_notify(struct notifier_block *not_blk,
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
+index 5c18f7e342f294..8a5fc20fd18692 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
+@@ -41,6 +41,7 @@ static int rxe_query_port(struct ib_device *ibdev,
+ u32 port_num, struct ib_port_attr *attr)
+ {
+ struct rxe_dev *rxe = to_rdev(ibdev);
++ struct net_device *ndev;
+ int err, ret;
+
+ if (port_num != 1) {
+@@ -49,6 +50,12 @@ static int rxe_query_port(struct ib_device *ibdev,
+ goto err_out;
+ }
+
++ ndev = rxe_ib_device_get_netdev(ibdev);
++ if (!ndev) {
++ err = -ENODEV;
++ goto err_out;
++ }
++
+ memcpy(attr, &rxe->port.attr, sizeof(*attr));
+
+ mutex_lock(&rxe->usdev_lock);
+@@ -57,13 +64,14 @@ static int rxe_query_port(struct ib_device *ibdev,
+
+ if (attr->state == IB_PORT_ACTIVE)
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+- else if (dev_get_flags(rxe->ndev) & IFF_UP)
++ else if (dev_get_flags(ndev) & IFF_UP)
+ attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
+ else
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+
+ mutex_unlock(&rxe->usdev_lock);
+
++ dev_put(ndev);
+ return ret;
+
+ err_out:
+@@ -1425,9 +1433,16 @@ static const struct attribute_group rxe_attr_group = {
+ static int rxe_enable_driver(struct ib_device *ib_dev)
+ {
+ struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
++ struct net_device *ndev;
++
++ ndev = rxe_ib_device_get_netdev(ib_dev);
++ if (!ndev)
++ return -ENODEV;
+
+ rxe_set_port_state(rxe);
+- dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
++ dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(ndev));
++
++ dev_put(ndev);
+ return 0;
+ }
+
+@@ -1495,7 +1510,8 @@ static const struct ib_device_ops rxe_dev_ops = {
+ INIT_RDMA_OBJ_SIZE(ib_mw, rxe_mw, ibmw),
+ };
+
+-int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
++int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
++ struct net_device *ndev)
+ {
+ int err;
+ struct ib_device *dev = &rxe->ib_dev;
+@@ -1507,13 +1523,13 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name)
+ dev->num_comp_vectors = num_possible_cpus();
+ dev->local_dma_lkey = 0;
+ addrconf_addr_eui48((unsigned char *)&dev->node_guid,
+- rxe->ndev->dev_addr);
++ ndev->dev_addr);
+
+ dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND) |
+ BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ);
+
+ ib_set_device_ops(dev, &rxe_dev_ops);
+- err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
++ err = ib_device_set_netdev(&rxe->ib_dev, ndev, 1);
+ if (err)
+ return err;
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
+index 3c1354f82283e6..6573ceec0ef583 100644
+--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
++++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
+@@ -370,6 +370,7 @@ struct rxe_port {
+ u32 qp_gsi_index;
+ };
+
++#define RXE_PORT 1
+ struct rxe_dev {
+ struct ib_device ib_dev;
+ struct ib_device_attr attr;
+@@ -377,8 +378,6 @@ struct rxe_dev {
+ int max_inline_data;
+ struct mutex usdev_lock;
+
+- struct net_device *ndev;
+-
+ struct rxe_pool uc_pool;
+ struct rxe_pool pd_pool;
+ struct rxe_pool ah_pool;
+@@ -406,6 +405,11 @@ struct rxe_dev {
+ struct crypto_shash *tfm;
+ };
+
++static inline struct net_device *rxe_ib_device_get_netdev(struct ib_device *dev)
++{
++ return ib_device_get_netdev(dev, RXE_PORT);
++}
++
+ static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
+ {
+ atomic64_inc(&rxe->stats_counters[index]);
+@@ -471,6 +475,7 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
+ return to_rpd(mw->ibmw.pd);
+ }
+
+-int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
++int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name,
++ struct net_device *ndev);
+
+ #endif /* RXE_VERBS_H */
+diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
+index 86d4d6a2170e17..ea5eee50dc39d0 100644
+--- a/drivers/infiniband/sw/siw/siw.h
++++ b/drivers/infiniband/sw/siw/siw.h
+@@ -46,6 +46,9 @@
+ */
+ #define SIW_IRQ_MAXBURST_SQ_ACTIVE 4
+
++/* There is always only a port 1 per siw device */
++#define SIW_PORT 1
++
+ struct siw_dev_cap {
+ int max_qp;
+ int max_qp_wr;
+@@ -69,16 +72,12 @@ struct siw_pd {
+
+ struct siw_device {
+ struct ib_device base_dev;
+- struct net_device *netdev;
+ struct siw_dev_cap attrs;
+
+ u32 vendor_part_id;
+ int numa_node;
+ char raw_gid[ETH_ALEN];
+
+- /* physical port state (only one port per device) */
+- enum ib_port_state state;
+-
+ spinlock_t lock;
+
+ struct xarray qp_xa;
+diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
+index 86323918a570eb..708b13993fdfd3 100644
+--- a/drivers/infiniband/sw/siw/siw_cm.c
++++ b/drivers/infiniband/sw/siw/siw_cm.c
+@@ -1759,6 +1759,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
+ {
+ struct socket *s;
+ struct siw_cep *cep = NULL;
++ struct net_device *ndev = NULL;
+ struct siw_device *sdev = to_siw_dev(id->device);
+ int addr_family = id->local_addr.ss_family;
+ int rv = 0;
+@@ -1779,9 +1780,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
+ struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
+
+ /* For wildcard addr, limit binding to current device only */
+- if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
+- s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
+-
++ if (ipv4_is_zeronet(laddr->sin_addr.s_addr)) {
++ ndev = ib_device_get_netdev(id->device, SIW_PORT);
++ if (ndev) {
++ s->sk->sk_bound_dev_if = ndev->ifindex;
++ } else {
++ rv = -ENODEV;
++ goto error;
++ }
++ }
+ rv = s->ops->bind(s, (struct sockaddr *)laddr,
+ sizeof(struct sockaddr_in));
+ } else {
+@@ -1797,9 +1804,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
+ }
+
+ /* For wildcard addr, limit binding to current device only */
+- if (ipv6_addr_any(&laddr->sin6_addr))
+- s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
+-
++ if (ipv6_addr_any(&laddr->sin6_addr)) {
++ ndev = ib_device_get_netdev(id->device, SIW_PORT);
++ if (ndev) {
++ s->sk->sk_bound_dev_if = ndev->ifindex;
++ } else {
++ rv = -ENODEV;
++ goto error;
++ }
++ }
+ rv = s->ops->bind(s, (struct sockaddr *)laddr,
+ sizeof(struct sockaddr_in6));
+ }
+@@ -1860,6 +1873,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
+ }
+ list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
+ cep->state = SIW_EPSTATE_LISTENING;
++ dev_put(ndev);
+
+ siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
+
+@@ -1879,6 +1893,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
+ siw_cep_set_free_and_put(cep);
+ }
+ sock_release(s);
++ dev_put(ndev);
+
+ return rv;
+ }
+diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
+index 17abef48abcd22..14d3103aee6f8a 100644
+--- a/drivers/infiniband/sw/siw/siw_main.c
++++ b/drivers/infiniband/sw/siw/siw_main.c
+@@ -287,7 +287,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
+ return NULL;
+
+ base_dev = &sdev->base_dev;
+- sdev->netdev = netdev;
+
+ if (netdev->addr_len) {
+ memcpy(sdev->raw_gid, netdev->dev_addr,
+@@ -381,12 +380,10 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
+
+ switch (event) {
+ case NETDEV_UP:
+- sdev->state = IB_PORT_ACTIVE;
+ siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE);
+ break;
+
+ case NETDEV_DOWN:
+- sdev->state = IB_PORT_DOWN;
+ siw_port_event(sdev, 1, IB_EVENT_PORT_ERR);
+ break;
+
+@@ -407,12 +404,8 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event,
+ siw_port_event(sdev, 1, IB_EVENT_LID_CHANGE);
+ break;
+ /*
+- * Todo: Below netdev events are currently not handled.
++ * All other events are not handled
+ */
+- case NETDEV_CHANGEMTU:
+- case NETDEV_CHANGE:
+- break;
+-
+ default:
+ break;
+ }
+@@ -442,12 +435,6 @@ static int siw_newlink(const char *basedev_name, struct net_device *netdev)
+ sdev = siw_device_create(netdev);
+ if (sdev) {
+ dev_dbg(&netdev->dev, "siw: new device\n");
+-
+- if (netif_running(netdev) && netif_carrier_ok(netdev))
+- sdev->state = IB_PORT_ACTIVE;
+- else
+- sdev->state = IB_PORT_DOWN;
+-
+ ib_mark_name_assigned_by_user(&sdev->base_dev);
+ rv = siw_device_register(sdev, basedev_name);
+ if (rv)
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
+index 986666c19378a1..7ca0297d68a4a7 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -171,21 +171,29 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
+ int siw_query_port(struct ib_device *base_dev, u32 port,
+ struct ib_port_attr *attr)
+ {
+- struct siw_device *sdev = to_siw_dev(base_dev);
++ struct net_device *ndev;
+ int rv;
+
+ memset(attr, 0, sizeof(*attr));
+
+ rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
+ &attr->active_width);
++ if (rv)
++ return rv;
++
++ ndev = ib_device_get_netdev(base_dev, SIW_PORT);
++ if (!ndev)
++ return -ENODEV;
++
+ attr->gid_tbl_len = 1;
+ attr->max_msg_sz = -1;
+- attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
+- attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
+- attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
++ attr->max_mtu = ib_mtu_int_to_enum(ndev->max_mtu);
++ attr->active_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
++ attr->phys_state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
+ IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
++ attr->state = attr->phys_state == IB_PORT_PHYS_STATE_LINK_UP ?
++ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
+- attr->state = sdev->state;
+ /*
+ * All zero
+ *
+@@ -199,6 +207,7 @@ int siw_query_port(struct ib_device *base_dev, u32 port,
+ * attr->subnet_timeout = 0;
+ * attr->init_type_repy = 0;
+ */
++ dev_put(ndev);
+ return rv;
+ }
+
+@@ -505,21 +514,24 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+ {
+ struct siw_qp *qp;
+- struct siw_device *sdev;
++ struct net_device *ndev;
+
+- if (base_qp && qp_attr && qp_init_attr) {
++ if (base_qp && qp_attr && qp_init_attr)
+ qp = to_siw_qp(base_qp);
+- sdev = to_siw_dev(base_qp->device);
+- } else {
++ else
+ return -EINVAL;
+- }
++
++ ndev = ib_device_get_netdev(base_qp->device, SIW_PORT);
++ if (!ndev)
++ return -ENODEV;
++
+ qp_attr->qp_state = siw_qp_state_to_ib_qp_state[qp->attrs.state];
+ qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
+ qp_attr->cap.max_send_wr = qp->attrs.sq_size;
+ qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
+ qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
+ qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
+- qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
++ qp_attr->path_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu));
+ qp_attr->max_rd_atomic = qp->attrs.irq_size;
+ qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
+
+@@ -534,6 +546,7 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
+
+ qp_init_attr->cap = qp_attr->cap;
+
++ dev_put(ndev);
+ return 0;
+ }
+
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index e83d956478521d..ef4abdea3c2d2e 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -349,6 +349,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ struct rtrs_srv_mr *srv_mr;
+ bool need_inval = false;
+ enum ib_send_flags flags;
++ struct ib_sge list;
+ u32 imm;
+ int err;
+
+@@ -401,7 +402,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
+ imm_wr.wr.next = NULL;
+ if (always_invalidate) {
+- struct ib_sge list;
+ struct rtrs_msg_rkey_rsp *msg;
+
+ srv_mr = &srv_path->mrs[id->msg_id];
+diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
+index 3be7bd8cd8cdeb..32abc2916b40ff 100644
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -64,7 +64,7 @@ static void gic_check_cpu_features(void)
+
+ union gic_base {
+ void __iomem *common_base;
+- void __percpu * __iomem *percpu_base;
++ void __iomem * __percpu *percpu_base;
+ };
+
+ struct gic_chip_data {
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index e113b99a3eab59..8716004fcf6c90 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -1867,20 +1867,20 @@ static int sdhci_msm_program_key(struct cqhci_host *cq_host,
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ union cqhci_crypto_cap_entry cap;
+
++ if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE))
++ return qcom_ice_evict_key(msm_host->ice, slot);
++
+ /* Only AES-256-XTS has been tested so far. */
+ cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx];
+ if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS ||
+ cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256)
+ return -EINVAL;
+
+- if (cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE)
+- return qcom_ice_program_key(msm_host->ice,
+- QCOM_ICE_CRYPTO_ALG_AES_XTS,
+- QCOM_ICE_CRYPTO_KEY_SIZE_256,
+- cfg->crypto_key,
+- cfg->data_unit_size, slot);
+- else
+- return qcom_ice_evict_key(msm_host->ice, slot);
++ return qcom_ice_program_key(msm_host->ice,
++ QCOM_ICE_CRYPTO_ALG_AES_XTS,
++ QCOM_ICE_CRYPTO_KEY_SIZE_256,
++ cfg->crypto_key,
++ cfg->data_unit_size, slot);
+ }
+
+ #else /* CONFIG_MMC_CRYPTO */
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index 0ba658a72d8fea..22556d339d6ea5 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -2,7 +2,7 @@
+ /*
+ * Microchip KSZ9477 switch driver main logic
+ *
+- * Copyright (C) 2017-2019 Microchip Technology Inc.
++ * Copyright (C) 2017-2024 Microchip Technology Inc.
+ */
+
+ #include <linux/kernel.h>
+@@ -983,26 +983,51 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
+ int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+ {
+ u32 secs = msecs / 1000;
+- u8 value;
+- u8 data;
++ u8 data, mult, value;
++ u32 max_val;
+ int ret;
+
+- value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
++#define MAX_TIMER_VAL ((1 << 8) - 1)
+
+- ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
+- if (ret < 0)
+- return ret;
++ /* The aging timer comprises a 3-bit multiplier and an 8-bit second
++ * value. Either of them cannot be zero. The maximum timer is then
++ * 7 * 255 = 1785 seconds.
++ */
++ if (!secs)
++ secs = 1;
+
+- data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
++ /* Return error if too large. */
++ else if (secs > 7 * MAX_TIMER_VAL)
++ return -EINVAL;
+
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
+ if (ret < 0)
+ return ret;
+
+- value &= ~SW_AGE_CNT_M;
+- value |= FIELD_PREP(SW_AGE_CNT_M, data);
++ /* Check whether there is need to update the multiplier. */
++ mult = FIELD_GET(SW_AGE_CNT_M, value);
++ max_val = MAX_TIMER_VAL;
++ if (mult > 0) {
++ /* Try to use the same multiplier already in the register as
++ * the hardware default uses multiplier 4 and 75 seconds for
++ * 300 seconds.
++ */
++ max_val = DIV_ROUND_UP(secs, mult);
++ if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
++ max_val = MAX_TIMER_VAL;
++ }
++
++ data = DIV_ROUND_UP(secs, max_val);
++ if (mult != data) {
++ value &= ~SW_AGE_CNT_M;
++ value |= FIELD_PREP(SW_AGE_CNT_M, data);
++ ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
++ if (ret < 0)
++ return ret;
++ }
+
+- return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
++ value = DIV_ROUND_UP(secs, data);
++ return ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
+ }
+
+ void ksz9477_port_queue_split(struct ksz_device *dev, int port)
+diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
+index 04235c22bf40e4..ff579920078ee3 100644
+--- a/drivers/net/dsa/microchip/ksz9477_reg.h
++++ b/drivers/net/dsa/microchip/ksz9477_reg.h
+@@ -2,7 +2,7 @@
+ /*
+ * Microchip KSZ9477 register definitions
+ *
+- * Copyright (C) 2017-2018 Microchip Technology Inc.
++ * Copyright (C) 2017-2024 Microchip Technology Inc.
+ */
+
+ #ifndef __KSZ9477_REGS_H
+@@ -165,8 +165,6 @@
+ #define SW_VLAN_ENABLE BIT(7)
+ #define SW_DROP_INVALID_VID BIT(6)
+ #define SW_AGE_CNT_M GENMASK(5, 3)
+-#define SW_AGE_CNT_S 3
+-#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
+ #define SW_RESV_MCAST_ENABLE BIT(2)
+ #define SW_HASH_OPTION_M 0x03
+ #define SW_HASH_OPTION_CRC 1
+diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
+index 824d9309a3d35e..7fe127a075de31 100644
+--- a/drivers/net/dsa/microchip/lan937x_main.c
++++ b/drivers/net/dsa/microchip/lan937x_main.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Microchip LAN937X switch driver main logic
+- * Copyright (C) 2019-2022 Microchip Technology Inc.
++ * Copyright (C) 2019-2024 Microchip Technology Inc.
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -260,10 +260,66 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
+
+ int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+ {
+- u32 secs = msecs / 1000;
+- u32 value;
++ u8 data, mult, value8;
++ bool in_msec = false;
++ u32 max_val, value;
++ u32 secs = msecs;
+ int ret;
+
++#define MAX_TIMER_VAL ((1 << 20) - 1)
++
++ /* The aging timer comprises a 3-bit multiplier and a 20-bit second
++ * value. Either of them cannot be zero. The maximum timer is then
++ * 7 * 1048575 = 7340025 seconds. As this value is too large for
++ * practical use it can be interpreted as microseconds, making the
++ * maximum timer 7340 seconds with finer control. This allows for
++ * maximum 122 minutes compared to 29 minutes in KSZ9477 switch.
++ */
++ if (msecs % 1000)
++ in_msec = true;
++ else
++ secs /= 1000;
++ if (!secs)
++ secs = 1;
++
++ /* Return error if too large. */
++ else if (secs > 7 * MAX_TIMER_VAL)
++ return -EINVAL;
++
++ /* Configure how to interpret the number value. */
++ ret = ksz_rmw8(dev, REG_SW_LUE_CTRL_2, SW_AGE_CNT_IN_MICROSEC,
++ in_msec ? SW_AGE_CNT_IN_MICROSEC : 0);
++ if (ret < 0)
++ return ret;
++
++ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value8);
++ if (ret < 0)
++ return ret;
++
++ /* Check whether there is need to update the multiplier. */
++ mult = FIELD_GET(SW_AGE_CNT_M, value8);
++ max_val = MAX_TIMER_VAL;
++ if (mult > 0) {
++ /* Try to use the same multiplier already in the register as
++ * the hardware default uses multiplier 4 and 75 seconds for
++ * 300 seconds.
++ */
++ max_val = DIV_ROUND_UP(secs, mult);
++ if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
++ max_val = MAX_TIMER_VAL;
++ }
++
++ data = DIV_ROUND_UP(secs, max_val);
++ if (mult != data) {
++ value8 &= ~SW_AGE_CNT_M;
++ value8 |= FIELD_PREP(SW_AGE_CNT_M, data);
++ ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value8);
++ if (ret < 0)
++ return ret;
++ }
++
++ secs = DIV_ROUND_UP(secs, data);
++
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
+diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
+index 2f22a9d01de36b..35269f74a314b4 100644
+--- a/drivers/net/dsa/microchip/lan937x_reg.h
++++ b/drivers/net/dsa/microchip/lan937x_reg.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ /* Microchip LAN937X switch register definitions
+- * Copyright (C) 2019-2021 Microchip Technology Inc.
++ * Copyright (C) 2019-2024 Microchip Technology Inc.
+ */
+ #ifndef __LAN937X_REG_H
+ #define __LAN937X_REG_H
+@@ -52,8 +52,7 @@
+
+ #define SW_VLAN_ENABLE BIT(7)
+ #define SW_DROP_INVALID_VID BIT(6)
+-#define SW_AGE_CNT_M 0x7
+-#define SW_AGE_CNT_S 3
++#define SW_AGE_CNT_M GENMASK(5, 3)
+ #define SW_RESV_MCAST_ENABLE BIT(2)
+
+ #define REG_SW_LUE_CTRL_1 0x0311
+@@ -66,6 +65,10 @@
+ #define SW_FAST_AGING BIT(1)
+ #define SW_LINK_AUTO_AGING BIT(0)
+
++#define REG_SW_LUE_CTRL_2 0x0312
++
++#define SW_AGE_CNT_IN_MICROSEC BIT(7)
++
+ #define REG_SW_AGE_PERIOD__1 0x0313
+ #define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
+
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index 0a68b526e4a821..2b784ced06451f 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -1967,7 +1967,11 @@ static int bcm_sysport_open(struct net_device *dev)
+ unsigned int i;
+ int ret;
+
+- clk_prepare_enable(priv->clk);
++ ret = clk_prepare_enable(priv->clk);
++ if (ret) {
++ netdev_err(dev, "could not enable priv clock\n");
++ return ret;
++ }
+
+ /* Reset UniMAC */
+ umac_reset(priv);
+@@ -2625,7 +2629,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
+ goto err_deregister_notifier;
+ }
+
+- clk_prepare_enable(priv->clk);
++ ret = clk_prepare_enable(priv->clk);
++ if (ret) {
++ dev_err(&pdev->dev, "could not enable priv clock\n");
++ goto err_deregister_netdev;
++ }
+
+ priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
+ dev_info(&pdev->dev,
+@@ -2639,6 +2647,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
+
+ return 0;
+
++err_deregister_netdev:
++ unregister_netdev(dev);
+ err_deregister_notifier:
+ unregister_netdevice_notifier(&priv->netdev_notifier);
+ err_deregister_fixed_link:
+@@ -2808,7 +2818,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
+ if (!netif_running(dev))
+ return 0;
+
+- clk_prepare_enable(priv->clk);
++ ret = clk_prepare_enable(priv->clk);
++ if (ret) {
++ netdev_err(dev, "could not enable priv clock\n");
++ return ret;
++ }
++
+ if (priv->wolopts)
+ clk_disable_unprepare(priv->wol_clk);
+
+diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
+index 301fa1ea4f5167..95471cfcff420a 100644
+--- a/drivers/net/ethernet/google/gve/gve.h
++++ b/drivers/net/ethernet/google/gve/gve.h
+@@ -1134,6 +1134,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
+ void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
+ bool gve_tx_poll(struct gve_notify_block *block, int budget);
+ bool gve_xdp_poll(struct gve_notify_block *block, int budget);
++int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
+ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg);
+ void gve_tx_free_rings_gqi(struct gve_priv *priv,
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 661566db68c860..d404819ebc9b3f 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -333,6 +333,14 @@ int gve_napi_poll(struct napi_struct *napi, int budget)
+
+ if (block->rx) {
+ work_done = gve_rx_poll(block, budget);
++
++ /* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of
++ * TX and RX work done.
++ */
++ if (priv->xdp_prog)
++ work_done = max_t(int, work_done,
++ gve_xsk_tx_poll(block, budget));
++
+ reschedule |= work_done == budget;
+ }
+
+@@ -922,11 +930,13 @@ static void gve_init_sync_stats(struct gve_priv *priv)
+ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *cfg)
+ {
++ int num_xdp_queues = priv->xdp_prog ? priv->rx_cfg.num_queues : 0;
++
+ cfg->qcfg = &priv->tx_cfg;
+ cfg->raw_addressing = !gve_is_qpl(priv);
+ cfg->ring_size = priv->tx_desc_cnt;
+ cfg->start_idx = 0;
+- cfg->num_rings = gve_num_tx_queues(priv);
++ cfg->num_rings = priv->tx_cfg.num_queues + num_xdp_queues;
+ cfg->tx = priv->tx;
+ }
+
+@@ -1623,8 +1633,8 @@ static int gve_xsk_pool_enable(struct net_device *dev,
+ if (err)
+ return err;
+
+- /* If XDP prog is not installed, return */
+- if (!priv->xdp_prog)
++ /* If XDP prog is not installed or interface is down, return. */
++ if (!priv->xdp_prog || !netif_running(dev))
+ return 0;
+
+ rx = &priv->rx[qid];
+@@ -1669,21 +1679,16 @@ static int gve_xsk_pool_disable(struct net_device *dev,
+ if (qid >= priv->rx_cfg.num_queues)
+ return -EINVAL;
+
+- /* If XDP prog is not installed, unmap DMA and return */
+- if (!priv->xdp_prog)
+- goto done;
+-
+- tx_qid = gve_xdp_tx_queue_id(priv, qid);
+- if (!netif_running(dev)) {
+- priv->rx[qid].xsk_pool = NULL;
+- xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
+- priv->tx[tx_qid].xsk_pool = NULL;
++ /* If XDP prog is not installed or interface is down, unmap DMA and
++ * return.
++ */
++ if (!priv->xdp_prog || !netif_running(dev))
+ goto done;
+- }
+
+ napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
+ napi_disable(napi_rx); /* make sure current rx poll is done */
+
++ tx_qid = gve_xdp_tx_queue_id(priv, qid);
+ napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
+ napi_disable(napi_tx); /* make sure current tx poll is done */
+
+@@ -1709,24 +1714,20 @@ static int gve_xsk_pool_disable(struct net_device *dev,
+ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+ {
+ struct gve_priv *priv = netdev_priv(dev);
+- int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
++ struct napi_struct *napi;
++
++ if (!gve_get_napi_enabled(priv))
++ return -ENETDOWN;
+
+ if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
+ return -EINVAL;
+
+- if (flags & XDP_WAKEUP_TX) {
+- struct gve_tx_ring *tx = &priv->tx[tx_queue_id];
+- struct napi_struct *napi =
+- &priv->ntfy_blocks[tx->ntfy_id].napi;
+-
+- if (!napi_if_scheduled_mark_missed(napi)) {
+- /* Call local_bh_enable to trigger SoftIRQ processing */
+- local_bh_disable();
+- napi_schedule(napi);
+- local_bh_enable();
+- }
+-
+- tx->xdp_xsk_wakeup++;
++ napi = &priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_id)].napi;
++ if (!napi_if_scheduled_mark_missed(napi)) {
++ /* Call local_bh_enable to trigger SoftIRQ processing */
++ local_bh_disable();
++ napi_schedule(napi);
++ local_bh_enable();
+ }
+
+ return 0;
+@@ -1837,6 +1838,7 @@ int gve_adjust_queues(struct gve_priv *priv,
+ {
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
++ int num_xdp_queues;
+ int err;
+
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+@@ -1847,6 +1849,10 @@ int gve_adjust_queues(struct gve_priv *priv,
+ rx_alloc_cfg.qcfg = &new_rx_config;
+ tx_alloc_cfg.num_rings = new_tx_config.num_queues;
+
++ /* Add dedicated XDP TX queues if enabled. */
++ num_xdp_queues = priv->xdp_prog ? new_rx_config.num_queues : 0;
++ tx_alloc_cfg.num_rings += num_xdp_queues;
++
+ if (netif_running(priv->dev)) {
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ return err;
+@@ -1891,6 +1897,9 @@ static void gve_turndown(struct gve_priv *priv)
+
+ gve_clear_napi_enabled(priv);
+ gve_clear_report_stats(priv);
++
++ /* Make sure that all traffic is finished processing. */
++ synchronize_net();
+ }
+
+ static void gve_turnup(struct gve_priv *priv)
+diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
+index e7fb7d6d283df1..4350ebd9c2bd9e 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx.c
++++ b/drivers/net/ethernet/google/gve/gve_tx.c
+@@ -206,7 +206,10 @@ void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
+ return;
+
+ gve_remove_napi(priv, ntfy_idx);
+- gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
++ if (tx->q_num < priv->tx_cfg.num_queues)
++ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
++ else
++ gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
+ netdev_tx_reset_queue(tx->netdev_txq);
+ gve_tx_remove_from_block(priv, idx);
+ }
+@@ -834,9 +837,12 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ struct gve_tx_ring *tx;
+ int i, err = 0, qid;
+
+- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
++ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
+ return -EINVAL;
+
++ if (!gve_get_napi_enabled(priv))
++ return -ENETDOWN;
++
+ qid = gve_xdp_tx_queue_id(priv,
+ smp_processor_id() % priv->num_xdp_queues);
+
+@@ -975,33 +981,41 @@ static int gve_xsk_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
+ return sent;
+ }
+
++int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget)
++{
++ struct gve_rx_ring *rx = rx_block->rx;
++ struct gve_priv *priv = rx->gve;
++ struct gve_tx_ring *tx;
++ int sent = 0;
++
++ tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
++ if (tx->xsk_pool) {
++ sent = gve_xsk_tx(priv, tx, budget);
++
++ u64_stats_update_begin(&tx->statss);
++ tx->xdp_xsk_sent += sent;
++ u64_stats_update_end(&tx->statss);
++ if (xsk_uses_need_wakeup(tx->xsk_pool))
++ xsk_set_tx_need_wakeup(tx->xsk_pool);
++ }
++
++ return sent;
++}
++
+ bool gve_xdp_poll(struct gve_notify_block *block, int budget)
+ {
+ struct gve_priv *priv = block->priv;
+ struct gve_tx_ring *tx = block->tx;
+ u32 nic_done;
+- bool repoll;
+ u32 to_do;
+
+ /* Find out how much work there is to be done */
+ nic_done = gve_tx_load_event_counter(priv, tx);
+ to_do = min_t(u32, (nic_done - tx->done), budget);
+ gve_clean_xdp_done(priv, tx, to_do);
+- repoll = nic_done != tx->done;
+-
+- if (tx->xsk_pool) {
+- int sent = gve_xsk_tx(priv, tx, budget);
+-
+- u64_stats_update_begin(&tx->statss);
+- tx->xdp_xsk_sent += sent;
+- u64_stats_update_end(&tx->statss);
+- repoll |= (sent == budget);
+- if (xsk_uses_need_wakeup(tx->xsk_pool))
+- xsk_set_tx_need_wakeup(tx->xsk_pool);
+- }
+
+ /* If we still have work we want to repoll */
+- return repoll;
++ return nic_done != tx->done;
+ }
+
+ bool gve_tx_poll(struct gve_notify_block *block, int budget)
+diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
+index 9e80899546d996..83b9905666e24f 100644
+--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
++++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
+@@ -2708,9 +2708,15 @@ static struct platform_device *port_platdev[3];
+
+ static void mv643xx_eth_shared_of_remove(void)
+ {
++ struct mv643xx_eth_platform_data *pd;
+ int n;
+
+ for (n = 0; n < 3; n++) {
++ if (!port_platdev[n])
++ continue;
++ pd = dev_get_platdata(&port_platdev[n]->dev);
++ if (pd)
++ of_node_put(pd->phy_node);
+ platform_device_del(port_platdev[n]);
+ port_platdev[n] = NULL;
+ }
+@@ -2773,8 +2779,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
+ }
+
+ ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
+- if (!ppdev)
+- return -ENOMEM;
++ if (!ppdev) {
++ ret = -ENOMEM;
++ goto put_err;
++ }
+ ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ppdev->dev.of_node = pnp;
+
+@@ -2796,6 +2804,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
+
+ port_err:
+ platform_device_put(ppdev);
++put_err:
++ of_node_put(ppd.phy_node);
+ return ret;
+ }
+
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index a7a16eac189134..52d99908d0e9d3 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -130,6 +130,7 @@ static const struct pci_device_id sky2_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+index cc9bcc42003242..6ab02f3fc29123 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+@@ -339,9 +339,13 @@ static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
+ {
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
+ struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
++ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ struct mlx5_macsec_rule_attrs rule_attrs;
+ union mlx5_macsec_rule *macsec_rule;
+
++ if (is_tx && tx_sc->encoding_sa != sa->assoc_num)
++ return 0;
++
+ rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+ rule_attrs.sci = sa->sci;
+ rule_attrs.assoc_num = sa->assoc_num;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index c14bef83d84d0f..62b8a7c1c6b54a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -6510,8 +6510,23 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
+
+ mlx5_core_uplink_netdev_set(mdev, NULL);
+ mlx5e_dcbnl_delete_app(priv);
+- unregister_netdev(priv->netdev);
+- _mlx5e_suspend(adev, false);
++ /* When unload driver, the netdev is in registered state
++ * if it's from legacy mode. If from switchdev mode, it
++ * is already unregistered before changing to NIC profile.
++ */
++ if (priv->netdev->reg_state == NETREG_REGISTERED) {
++ unregister_netdev(priv->netdev);
++ _mlx5e_suspend(adev, false);
++ } else {
++ struct mlx5_core_dev *pos;
++ int i;
++
++ if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
++ mlx5_sd_for_each_dev(i, mdev, pos)
++ mlx5e_destroy_mdev_resources(pos);
++ else
++ _mlx5e_suspend(adev, true);
++ }
+ /* Avoid cleanup if profile rollback failed. */
+ if (priv->profile)
+ priv->profile->cleanup(priv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 92094bf60d5986..0657d107653577 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -1508,6 +1508,21 @@ mlx5e_vport_uplink_rep_unload(struct mlx5e_rep_priv *rpriv)
+
+ priv = netdev_priv(netdev);
+
++ /* This bit is set when using devlink to change eswitch mode from
++ * switchdev to legacy. As need to keep uplink netdev ifindex, we
++ * detach uplink representor profile and attach NIC profile only.
++ * The netdev will be unregistered later when unload NIC auxiliary
++ * driver for this case.
++ * We explicitly block devlink eswitch mode change if any IPSec rules
++ * offloaded, but can't block other cases, such as driver unload
++ * and devlink reload. We have to unregister netdev before profile
++ * change for those cases. This is to avoid resource leak because
++ * the offloaded rules don't have the chance to be unoffloaded before
++ * cleanup which is triggered by detach uplink representor profile.
++ */
++ if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_SWITCH_LEGACY))
++ unregister_netdev(netdev);
++
+ mlx5e_netdev_attach_nic_profile(priv);
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+index 5a0047bdcb5105..ed977ae75fab89 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+@@ -150,11 +150,11 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
+ unsigned long i;
+ int err;
+
+- xa_for_each(&esw->offloads.vport_reps, i, rep) {
+- rpriv = rep->rep_data[REP_ETH].priv;
+- if (!rpriv || !rpriv->netdev)
++ mlx5_esw_for_each_rep(esw, i, rep) {
++ if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
+ continue;
+
++ rpriv = rep->rep_data[REP_ETH].priv;
+ rhashtable_walk_enter(&rpriv->tc_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while ((flow = rhashtable_walk_next(&iter)) != NULL) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+index f44b4c7ebcfd73..48fd0400ffd4ec 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -716,6 +716,9 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
+ MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
+ (last) - 1)
+
++#define mlx5_esw_for_each_rep(esw, i, rep) \
++ xa_for_each(&((esw)->offloads.vport_reps), i, rep)
++
+ struct mlx5_eswitch *__must_check
+ mlx5_devlink_eswitch_get(struct devlink *devlink);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 8cf61ae8b89d24..3950b1d4b3d8e5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -53,9 +53,6 @@
+ #include "lag/lag.h"
+ #include "en/tc/post_meter.h"
+
+-#define mlx5_esw_for_each_rep(esw, i, rep) \
+- xa_for_each(&((esw)->offloads.vport_reps), i, rep)
+-
+ /* There are two match-all miss flows, one for unicast dst mac and
+ * one for multicast.
+ */
+@@ -3762,6 +3759,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ esw->eswitch_operation_in_progress = true;
+ up_write(&esw->mode_lock);
+
++ if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
++ esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY;
+ mlx5_eswitch_disable_locked(esw);
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
+ if (mlx5_devlink_trap_get_num_active(esw->dev)) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+index 6fa06ba2d34653..f57c84e5128bc7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+@@ -1067,7 +1067,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
+ int inlen, err, eqn;
+ void *cqc, *in;
+ __be64 *pas;
+- int vector;
+ u32 i;
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+@@ -1096,8 +1095,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
+ if (!in)
+ goto err_cqwq;
+
+- vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
+- err = mlx5_comp_eqn_get(mdev, vector, &eqn);
++ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
+ if (err) {
+ kvfree(in);
+ goto err_cqwq;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+index 4b5fd71c897ddb..32d2e61f2b8238 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+@@ -423,8 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
+
+ parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
+ ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
+- 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
+- 0);
++ 0, 0, tun->net, parms.link, tun->fwmark, 0, 0);
+
+ rt = ip_route_output_key(tun->net, &fl4);
+ if (IS_ERR(rt))
+diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c
+index d90206f27161e4..c0603f54cec3ad 100644
+--- a/drivers/net/ethernet/sfc/tc_conntrack.c
++++ b/drivers/net/ethernet/sfc/tc_conntrack.c
+@@ -16,7 +16,7 @@ static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+
+ static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
+- .key_len = offsetof(struct efx_tc_ct_zone, linkage),
++ .key_len = sizeof_field(struct efx_tc_ct_zone, zone),
+ .key_offset = 0,
+ .head_offset = offsetof(struct efx_tc_ct_zone, linkage),
+ };
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index ad868e8d195d59..aaf008bdbbcd46 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -405,22 +405,6 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
+ return -ENODEV;
+ }
+
+-/**
+- * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
+- * @pdev: platform_device structure
+- * @plat: driver data platform structure
+- *
+- * Release resources claimed by stmmac_probe_config_dt().
+- */
+-static void stmmac_remove_config_dt(struct platform_device *pdev,
+- struct plat_stmmacenet_data *plat)
+-{
+- clk_disable_unprepare(plat->stmmac_clk);
+- clk_disable_unprepare(plat->pclk);
+- of_node_put(plat->phy_node);
+- of_node_put(plat->mdio_node);
+-}
+-
+ /**
+ * stmmac_probe_config_dt - parse device-tree driver parameters
+ * @pdev: platform_device structure
+@@ -490,8 +474,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
+
+ rc = stmmac_mdio_setup(plat, np, &pdev->dev);
+- if (rc)
+- return ERR_PTR(rc);
++ if (rc) {
++ ret = ERR_PTR(rc);
++ goto error_put_phy;
++ }
+
+ of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
+
+@@ -580,8 +566,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
+ GFP_KERNEL);
+ if (!dma_cfg) {
+- stmmac_remove_config_dt(pdev, plat);
+- return ERR_PTR(-ENOMEM);
++ ret = ERR_PTR(-ENOMEM);
++ goto error_put_mdio;
+ }
+ plat->dma_cfg = dma_cfg;
+
+@@ -609,8 +595,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+
+ rc = stmmac_mtl_setup(pdev, plat);
+ if (rc) {
+- stmmac_remove_config_dt(pdev, plat);
+- return ERR_PTR(rc);
++ ret = ERR_PTR(rc);
++ goto error_put_mdio;
+ }
+
+ /* clock setup */
+@@ -662,6 +648,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ clk_disable_unprepare(plat->pclk);
+ error_pclk_get:
+ clk_disable_unprepare(plat->stmmac_clk);
++error_put_mdio:
++ of_node_put(plat->mdio_node);
++error_put_phy:
++ of_node_put(plat->phy_node);
+
+ return ret;
+ }
+@@ -670,16 +660,17 @@ static void devm_stmmac_remove_config_dt(void *data)
+ {
+ struct plat_stmmacenet_data *plat = data;
+
+- /* Platform data argument is unused */
+- stmmac_remove_config_dt(NULL, plat);
++ clk_disable_unprepare(plat->stmmac_clk);
++ clk_disable_unprepare(plat->pclk);
++ of_node_put(plat->mdio_node);
++ of_node_put(plat->phy_node);
+ }
+
+ /**
+ * devm_stmmac_probe_config_dt
+ * @pdev: platform_device structure
+ * @mac: MAC address to use
+- * Description: Devres variant of stmmac_probe_config_dt(). Does not require
+- * the user to call stmmac_remove_config_dt() at driver detach.
++ * Description: Devres variant of stmmac_probe_config_dt().
+ */
+ struct plat_stmmacenet_data *
+ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index ba6db61dd227c4..dfca13b82bdce2 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -3525,7 +3525,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
+ init_completion(&common->tdown_complete);
+ common->tx_ch_num = AM65_CPSW_DEFAULT_TX_CHNS;
+ common->rx_ch_num_flows = AM65_CPSW_DEFAULT_RX_CHN_FLOWS;
+- common->pf_p0_rx_ptype_rrobin = false;
++ common->pf_p0_rx_ptype_rrobin = true;
+ common->default_vlan = 1;
+
+ common->ports = devm_kcalloc(dev, common->port_num,
+diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
+index 5d6d1cf78e93f2..768578c0d9587d 100644
+--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
++++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
+@@ -215,6 +215,9 @@ static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
+ for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
+ IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
++
++ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
++ IEP_CMP_CFG_CMP_EN(cmp), 0);
+ }
+
+ /* enable reset counter on CMP0 event */
+@@ -780,6 +783,11 @@ int icss_iep_exit(struct icss_iep *iep)
+ }
+ icss_iep_disable(iep);
+
++ if (iep->pps_enabled)
++ icss_iep_pps_enable(iep, false);
++ else if (iep->perout_enabled)
++ icss_iep_perout_enable(iep, NULL, false);
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(icss_iep_exit);
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
+index fdebeb2f84e00c..74f0f200a89d4f 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_common.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
+@@ -855,31 +855,6 @@ irqreturn_t prueth_rx_irq(int irq, void *dev_id)
+ }
+ EXPORT_SYMBOL_GPL(prueth_rx_irq);
+
+-void prueth_emac_stop(struct prueth_emac *emac)
+-{
+- struct prueth *prueth = emac->prueth;
+- int slice;
+-
+- switch (emac->port_id) {
+- case PRUETH_PORT_MII0:
+- slice = ICSS_SLICE0;
+- break;
+- case PRUETH_PORT_MII1:
+- slice = ICSS_SLICE1;
+- break;
+- default:
+- netdev_err(emac->ndev, "invalid port\n");
+- return;
+- }
+-
+- emac->fw_running = 0;
+- if (!emac->is_sr1)
+- rproc_shutdown(prueth->txpru[slice]);
+- rproc_shutdown(prueth->rtu[slice]);
+- rproc_shutdown(prueth->pru[slice]);
+-}
+-EXPORT_SYMBOL_GPL(prueth_emac_stop);
+-
+ void prueth_cleanup_tx_ts(struct prueth_emac *emac)
+ {
+ int i;
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
+index 5d2491c2943a8b..ddfd1c02a88544 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
+@@ -397,7 +397,7 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
+ return 0;
+ }
+
+-static void icssg_init_emac_mode(struct prueth *prueth)
++void icssg_init_emac_mode(struct prueth *prueth)
+ {
+ /* When the device is configured as a bridge and it is being brought
+ * back to the emac mode, the host mac address has to be set as 0.
+@@ -406,9 +406,6 @@ static void icssg_init_emac_mode(struct prueth *prueth)
+ int i;
+ u8 mac[ETH_ALEN] = { 0 };
+
+- if (prueth->emacs_initialized)
+- return;
+-
+ /* Set VLAN TABLE address base */
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
+ addr << SMEM_VLAN_OFFSET);
+@@ -423,15 +420,13 @@ static void icssg_init_emac_mode(struct prueth *prueth)
+ /* Clear host MAC address */
+ icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
+ }
++EXPORT_SYMBOL_GPL(icssg_init_emac_mode);
+
+-static void icssg_init_fw_offload_mode(struct prueth *prueth)
++void icssg_init_fw_offload_mode(struct prueth *prueth)
+ {
+ u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
+ int i;
+
+- if (prueth->emacs_initialized)
+- return;
+-
+ /* Set VLAN TABLE address base */
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
+ addr << SMEM_VLAN_OFFSET);
+@@ -448,6 +443,7 @@ static void icssg_init_fw_offload_mode(struct prueth *prueth)
+ icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
+ icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
+ }
++EXPORT_SYMBOL_GPL(icssg_init_fw_offload_mode);
+
+ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
+ {
+@@ -455,11 +451,6 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
+ struct icssg_flow_cfg __iomem *flow_cfg;
+ int ret;
+
+- if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
+- icssg_init_fw_offload_mode(prueth);
+- else
+- icssg_init_emac_mode(prueth);
+-
+ memset_io(config, 0, TAS_GATE_MASK_LIST0);
+ icssg_miig_queues_init(prueth, slice);
+
+@@ -786,3 +777,27 @@ void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
+ writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
+ }
+ EXPORT_SYMBOL_GPL(icssg_set_pvid);
++
++int emac_fdb_flow_id_updated(struct prueth_emac *emac)
++{
++ struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
++ int slice = prueth_emac_slice(emac);
++ struct mgmt_cmd fdb_cmd = { 0 };
++ int ret;
++
++ fdb_cmd.header = ICSSG_FW_MGMT_CMD_HEADER;
++ fdb_cmd.type = ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW;
++ fdb_cmd.seqnum = ++(emac->prueth->icssg_hwcmdseq);
++ fdb_cmd.param = 0;
++
++ fdb_cmd.param |= (slice << 4);
++ fdb_cmd.cmd_args[0] = 0;
++
++ ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
++ if (ret)
++ return ret;
++
++ WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
++ return fdb_cmd_rsp.status == 1 ? 0 : -EINVAL;
++}
++EXPORT_SYMBOL_GPL(emac_fdb_flow_id_updated);
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
+index 92c2deaa306835..c884e9fa099e6f 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
++++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
+@@ -55,6 +55,7 @@ struct icssg_rxq_ctx {
+ #define ICSSG_FW_MGMT_FDB_CMD_TYPE 0x03
+ #define ICSSG_FW_MGMT_CMD_TYPE 0x04
+ #define ICSSG_FW_MGMT_PKT 0x80000000
++#define ICSSG_FW_MGMT_FDB_CMD_TYPE_RX_FLOW 0x05
+
+ struct icssg_r30_cmd {
+ u32 cmd[4];
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index fe2fd1bfc904db..cb11635a8d1209 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -164,11 +164,26 @@ static struct icssg_firmwares icssg_emac_firmwares[] = {
+ }
+ };
+
+-static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
++static int prueth_start(struct rproc *rproc, const char *fw_name)
++{
++ int ret;
++
++ ret = rproc_set_firmware(rproc, fw_name);
++ if (ret)
++ return ret;
++ return rproc_boot(rproc);
++}
++
++static void prueth_shutdown(struct rproc *rproc)
++{
++ rproc_shutdown(rproc);
++}
++
++static int prueth_emac_start(struct prueth *prueth)
+ {
+ struct icssg_firmwares *firmwares;
+ struct device *dev = prueth->dev;
+- int slice, ret;
++ int ret, slice;
+
+ if (prueth->is_switch_mode)
+ firmwares = icssg_switch_firmwares;
+@@ -177,49 +192,126 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
+ else
+ firmwares = icssg_emac_firmwares;
+
+- slice = prueth_emac_slice(emac);
+- if (slice < 0) {
+- netdev_err(emac->ndev, "invalid port\n");
+- return -EINVAL;
++ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
++ ret = prueth_start(prueth->pru[slice], firmwares[slice].pru);
++ if (ret) {
++ dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
++ goto unwind_slices;
++ }
++
++ ret = prueth_start(prueth->rtu[slice], firmwares[slice].rtu);
++ if (ret) {
++ dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
++ rproc_shutdown(prueth->pru[slice]);
++ goto unwind_slices;
++ }
++
++ ret = prueth_start(prueth->txpru[slice], firmwares[slice].txpru);
++ if (ret) {
++ dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
++ rproc_shutdown(prueth->rtu[slice]);
++ rproc_shutdown(prueth->pru[slice]);
++ goto unwind_slices;
++ }
+ }
+
+- ret = icssg_config(prueth, emac, slice);
+- if (ret)
+- return ret;
++ return 0;
+
+- ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
+- ret = rproc_boot(prueth->pru[slice]);
+- if (ret) {
+- dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
+- return -EINVAL;
++unwind_slices:
++ while (--slice >= 0) {
++ prueth_shutdown(prueth->txpru[slice]);
++ prueth_shutdown(prueth->rtu[slice]);
++ prueth_shutdown(prueth->pru[slice]);
+ }
+
+- ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
+- ret = rproc_boot(prueth->rtu[slice]);
+- if (ret) {
+- dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
+- goto halt_pru;
++ return ret;
++}
++
++static void prueth_emac_stop(struct prueth *prueth)
++{
++ int slice;
++
++ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
++ prueth_shutdown(prueth->txpru[slice]);
++ prueth_shutdown(prueth->rtu[slice]);
++ prueth_shutdown(prueth->pru[slice]);
+ }
++}
++
++static int prueth_emac_common_start(struct prueth *prueth)
++{
++ struct prueth_emac *emac;
++ int ret = 0;
++ int slice;
++
++ if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
++ return -EINVAL;
++
++ /* clear SMEM and MSMC settings for all slices */
++ memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
++ memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
++
++ icssg_class_default(prueth->miig_rt, ICSS_SLICE0, 0, false);
++ icssg_class_default(prueth->miig_rt, ICSS_SLICE1, 0, false);
++
++ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode)
++ icssg_init_fw_offload_mode(prueth);
++ else
++ icssg_init_emac_mode(prueth);
++
++ for (slice = 0; slice < PRUETH_NUM_MACS; slice++) {
++ emac = prueth->emac[slice];
++ if (!emac)
++ continue;
++ ret = icssg_config(prueth, emac, slice);
++ if (ret)
++ goto disable_class;
++ }
++
++ ret = prueth_emac_start(prueth);
++ if (ret)
++ goto disable_class;
+
+- ret = rproc_set_firmware(prueth->txpru[slice], firmwares[slice].txpru);
+- ret = rproc_boot(prueth->txpru[slice]);
++ emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
++ prueth->emac[ICSS_SLICE1];
++ ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
++ emac, IEP_DEFAULT_CYCLE_TIME_NS);
+ if (ret) {
+- dev_err(dev, "failed to boot TX_PRU%d: %d\n", slice, ret);
+- goto halt_rtu;
++ dev_err(prueth->dev, "Failed to initialize IEP module\n");
++ goto stop_pruss;
+ }
+
+- emac->fw_running = 1;
+ return 0;
+
+-halt_rtu:
+- rproc_shutdown(prueth->rtu[slice]);
++stop_pruss:
++ prueth_emac_stop(prueth);
+
+-halt_pru:
+- rproc_shutdown(prueth->pru[slice]);
++disable_class:
++ icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
++ icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
+
+ return ret;
+ }
+
++static int prueth_emac_common_stop(struct prueth *prueth)
++{
++ struct prueth_emac *emac;
++
++ if (!prueth->emac[ICSS_SLICE0] && !prueth->emac[ICSS_SLICE1])
++ return -EINVAL;
++
++ icssg_class_disable(prueth->miig_rt, ICSS_SLICE0);
++ icssg_class_disable(prueth->miig_rt, ICSS_SLICE1);
++
++ prueth_emac_stop(prueth);
++
++ emac = prueth->emac[ICSS_SLICE0] ? prueth->emac[ICSS_SLICE0] :
++ prueth->emac[ICSS_SLICE1];
++ icss_iep_exit(emac->iep);
++
++ return 0;
++}
++
+ /* called back by PHY layer if there is change in link state of hw port*/
+ static void emac_adjust_link(struct net_device *ndev)
+ {
+@@ -374,9 +466,6 @@ static void prueth_iep_settime(void *clockops_data, u64 ns)
+ u32 cycletime;
+ int timeout;
+
+- if (!emac->fw_running)
+- return;
+-
+ sc_descp = emac->prueth->shram.va + TIMESYNC_FW_WC_SETCLOCK_DESC_OFFSET;
+
+ cycletime = IEP_DEFAULT_CYCLE_TIME_NS;
+@@ -543,23 +632,17 @@ static int emac_ndo_open(struct net_device *ndev)
+ {
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret, i, num_data_chn = emac->tx_ch_num;
++ struct icssg_flow_cfg __iomem *flow_cfg;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ struct device *dev = prueth->dev;
+ int max_rx_flows;
+ int rx_flow;
+
+- /* clear SMEM and MSMC settings for all slices */
+- if (!prueth->emacs_initialized) {
+- memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
+- memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
+- }
+-
+ /* set h/w MAC as user might have re-configured */
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
+- icssg_class_default(prueth->miig_rt, slice, 0, false);
+ icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
+
+ /* Notify the stack of the actual queue counts. */
+@@ -597,18 +680,23 @@ static int emac_ndo_open(struct net_device *ndev)
+ goto cleanup_napi;
+ }
+
+- /* reset and start PRU firmware */
+- ret = prueth_emac_start(prueth, emac);
+- if (ret)
+- goto free_rx_irq;
++ if (!prueth->emacs_initialized) {
++ ret = prueth_emac_common_start(prueth);
++ if (ret)
++ goto free_rx_irq;
++ }
+
+- icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
++ flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
++ writew(emac->rx_flow_id_base, &flow_cfg->rx_base_flow);
++ ret = emac_fdb_flow_id_updated(emac);
+
+- if (!prueth->emacs_initialized) {
+- ret = icss_iep_init(emac->iep, &prueth_iep_clockops,
+- emac, IEP_DEFAULT_CYCLE_TIME_NS);
++ if (ret) {
++ netdev_err(ndev, "Failed to update Rx Flow ID %d", ret);
++ goto stop;
+ }
+
++ icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
++
+ ret = request_threaded_irq(emac->tx_ts_irq, NULL, prueth_tx_ts_irq,
+ IRQF_ONESHOT, dev_name(dev), emac);
+ if (ret)
+@@ -653,7 +741,8 @@ static int emac_ndo_open(struct net_device *ndev)
+ free_tx_ts_irq:
+ free_irq(emac->tx_ts_irq, emac);
+ stop:
+- prueth_emac_stop(emac);
++ if (!prueth->emacs_initialized)
++ prueth_emac_common_stop(prueth);
+ free_rx_irq:
+ free_irq(emac->rx_chns.irq[rx_flow], emac);
+ cleanup_napi:
+@@ -689,8 +778,6 @@ static int emac_ndo_stop(struct net_device *ndev)
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
+
+- icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
+-
+ if (emac->prueth->is_hsr_offload_mode)
+ __dev_mc_unsync(ndev, icssg_prueth_hsr_del_mcast);
+ else
+@@ -728,11 +815,9 @@ static int emac_ndo_stop(struct net_device *ndev)
+ /* Destroying the queued work in ndo_stop() */
+ cancel_delayed_work_sync(&emac->stats_work);
+
+- if (prueth->emacs_initialized == 1)
+- icss_iep_exit(emac->iep);
+-
+ /* stop PRUs */
+- prueth_emac_stop(emac);
++ if (prueth->emacs_initialized == 1)
++ prueth_emac_common_stop(prueth);
+
+ free_irq(emac->tx_ts_irq, emac);
+
+@@ -1010,10 +1095,11 @@ static void prueth_offload_fwd_mark_update(struct prueth *prueth)
+ }
+ }
+
+-static void prueth_emac_restart(struct prueth *prueth)
++static int prueth_emac_restart(struct prueth *prueth)
+ {
+ struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
+ struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
++ int ret;
+
+ /* Detach the net_device for both PRUeth ports*/
+ if (netif_running(emac0->ndev))
+@@ -1022,36 +1108,46 @@ static void prueth_emac_restart(struct prueth *prueth)
+ netif_device_detach(emac1->ndev);
+
+ /* Disable both PRUeth ports */
+- icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
+- icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
++ ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
++ ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
++ if (ret)
++ return ret;
+
+ /* Stop both pru cores for both PRUeth ports*/
+- prueth_emac_stop(emac0);
+- prueth->emacs_initialized--;
+- prueth_emac_stop(emac1);
+- prueth->emacs_initialized--;
++ ret = prueth_emac_common_stop(prueth);
++ if (ret) {
++ dev_err(prueth->dev, "Failed to stop the firmwares");
++ return ret;
++ }
+
+ /* Start both pru cores for both PRUeth ports */
+- prueth_emac_start(prueth, emac0);
+- prueth->emacs_initialized++;
+- prueth_emac_start(prueth, emac1);
+- prueth->emacs_initialized++;
++ ret = prueth_emac_common_start(prueth);
++ if (ret) {
++ dev_err(prueth->dev, "Failed to start the firmwares");
++ return ret;
++ }
+
+ /* Enable forwarding for both PRUeth ports */
+- icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
+- icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
++ ret = icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
++ ret |= icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
+
+ /* Attache net_device for both PRUeth ports */
+ netif_device_attach(emac0->ndev);
+ netif_device_attach(emac1->ndev);
++
++ return ret;
+ }
+
+ static void icssg_change_mode(struct prueth *prueth)
+ {
+ struct prueth_emac *emac;
+- int mac;
++ int mac, ret;
+
+- prueth_emac_restart(prueth);
++ ret = prueth_emac_restart(prueth);
++ if (ret) {
++ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
++ return;
++ }
+
+ for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
+ emac = prueth->emac[mac];
+@@ -1130,13 +1226,18 @@ static void prueth_netdevice_port_unlink(struct net_device *ndev)
+ {
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
++ int ret;
+
+ prueth->br_members &= ~BIT(emac->port_id);
+
+ if (prueth->is_switch_mode) {
+ prueth->is_switch_mode = false;
+ emac->port_vlan = 0;
+- prueth_emac_restart(prueth);
++ ret = prueth_emac_restart(prueth);
++ if (ret) {
++ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
++ return;
++ }
+ }
+
+ prueth_offload_fwd_mark_update(prueth);
+@@ -1185,6 +1286,7 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
+ struct prueth *prueth = emac->prueth;
+ struct prueth_emac *emac0;
+ struct prueth_emac *emac1;
++ int ret;
+
+ emac0 = prueth->emac[PRUETH_MAC0];
+ emac1 = prueth->emac[PRUETH_MAC1];
+@@ -1195,7 +1297,11 @@ static void prueth_hsr_port_unlink(struct net_device *ndev)
+ emac0->port_vlan = 0;
+ emac1->port_vlan = 0;
+ prueth->hsr_dev = NULL;
+- prueth_emac_restart(prueth);
++ ret = prueth_emac_restart(prueth);
++ if (ret) {
++ dev_err(prueth->dev, "Failed to restart the firmwares, aborting the process");
++ return;
++ }
+ netdev_dbg(ndev, "Disabling HSR Offload mode\n");
+ }
+ }
+@@ -1370,13 +1476,10 @@ static int prueth_probe(struct platform_device *pdev)
+ prueth->pa_stats = NULL;
+ }
+
+- if (eth0_node) {
++ if (eth0_node || eth1_node) {
+ ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
+ if (ret)
+ goto put_cores;
+- }
+-
+- if (eth1_node) {
+ ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
+ if (ret)
+ goto put_cores;
+@@ -1575,14 +1678,12 @@ static int prueth_probe(struct platform_device *pdev)
+ pruss_put(prueth->pruss);
+
+ put_cores:
+- if (eth1_node) {
+- prueth_put_cores(prueth, ICSS_SLICE1);
+- of_node_put(eth1_node);
+- }
+-
+- if (eth0_node) {
++ if (eth0_node || eth1_node) {
+ prueth_put_cores(prueth, ICSS_SLICE0);
+ of_node_put(eth0_node);
++
++ prueth_put_cores(prueth, ICSS_SLICE1);
++ of_node_put(eth1_node);
+ }
+
+ return ret;
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+index f5c1d473e9f991..5473315ea20406 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+@@ -140,7 +140,6 @@ struct prueth_rx_chn {
+ /* data for each emac port */
+ struct prueth_emac {
+ bool is_sr1;
+- bool fw_running;
+ struct prueth *prueth;
+ struct net_device *ndev;
+ u8 mac_addr[6];
+@@ -361,6 +360,8 @@ int icssg_set_port_state(struct prueth_emac *emac,
+ enum icssg_port_state_cmd state);
+ void icssg_config_set_speed(struct prueth_emac *emac);
+ void icssg_config_half_duplex(struct prueth_emac *emac);
++void icssg_init_emac_mode(struct prueth *prueth);
++void icssg_init_fw_offload_mode(struct prueth *prueth);
+
+ /* Buffer queue helpers */
+ int icssg_queue_pop(struct prueth *prueth, u8 queue);
+@@ -377,6 +378,7 @@ void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
+ u8 untag_mask, bool add);
+ u16 icssg_get_pvid(struct prueth_emac *emac);
+ void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port);
++int emac_fdb_flow_id_updated(struct prueth_emac *emac);
+ #define prueth_napi_to_tx_chn(pnapi) \
+ container_of(pnapi, struct prueth_tx_chn, napi_tx)
+
+@@ -407,7 +409,6 @@ void emac_rx_timestamp(struct prueth_emac *emac,
+ struct sk_buff *skb, u32 *psdata);
+ enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+ irqreturn_t prueth_rx_irq(int irq, void *dev_id);
+-void prueth_emac_stop(struct prueth_emac *emac);
+ void prueth_cleanup_tx_ts(struct prueth_emac *emac);
+ int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget);
+ int prueth_prepare_rx_chan(struct prueth_emac *emac,
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+index 292f04d29f4f7b..f88cdc8f012f12 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
+@@ -440,7 +440,6 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
+ goto halt_pru;
+ }
+
+- emac->fw_running = 1;
+ return 0;
+
+ halt_pru:
+@@ -449,6 +448,29 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
+ return ret;
+ }
+
++static void prueth_emac_stop(struct prueth_emac *emac)
++{
++ struct prueth *prueth = emac->prueth;
++ int slice;
++
++ switch (emac->port_id) {
++ case PRUETH_PORT_MII0:
++ slice = ICSS_SLICE0;
++ break;
++ case PRUETH_PORT_MII1:
++ slice = ICSS_SLICE1;
++ break;
++ default:
++ netdev_err(emac->ndev, "invalid port\n");
++ return;
++ }
++
++ if (!emac->is_sr1)
++ rproc_shutdown(prueth->txpru[slice]);
++ rproc_shutdown(prueth->rtu[slice]);
++ rproc_shutdown(prueth->pru[slice]);
++}
++
+ /**
+ * emac_ndo_open - EMAC device open
+ * @ndev: network adapter device
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 65b0a3115e14cd..64926240b0071d 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -432,10 +432,12 @@ struct kszphy_ptp_priv {
+ struct kszphy_priv {
+ struct kszphy_ptp_priv ptp_priv;
+ const struct kszphy_type *type;
++ struct clk *clk;
+ int led_mode;
+ u16 vct_ctrl1000;
+ bool rmii_ref_clk_sel;
+ bool rmii_ref_clk_sel_val;
++ bool clk_enable;
+ u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
+ };
+
+@@ -2052,6 +2054,46 @@ static void kszphy_get_stats(struct phy_device *phydev,
+ data[i] = kszphy_get_stat(phydev, i);
+ }
+
++static void kszphy_enable_clk(struct phy_device *phydev)
++{
++ struct kszphy_priv *priv = phydev->priv;
++
++ if (!priv->clk_enable && priv->clk) {
++ clk_prepare_enable(priv->clk);
++ priv->clk_enable = true;
++ }
++}
++
++static void kszphy_disable_clk(struct phy_device *phydev)
++{
++ struct kszphy_priv *priv = phydev->priv;
++
++ if (priv->clk_enable && priv->clk) {
++ clk_disable_unprepare(priv->clk);
++ priv->clk_enable = false;
++ }
++}
++
++static int kszphy_generic_resume(struct phy_device *phydev)
++{
++ kszphy_enable_clk(phydev);
++
++ return genphy_resume(phydev);
++}
++
++static int kszphy_generic_suspend(struct phy_device *phydev)
++{
++ int ret;
++
++ ret = genphy_suspend(phydev);
++ if (ret)
++ return ret;
++
++ kszphy_disable_clk(phydev);
++
++ return 0;
++}
++
+ static int kszphy_suspend(struct phy_device *phydev)
+ {
+ /* Disable PHY Interrupts */
+@@ -2061,7 +2103,7 @@ static int kszphy_suspend(struct phy_device *phydev)
+ phydev->drv->config_intr(phydev);
+ }
+
+- return genphy_suspend(phydev);
++ return kszphy_generic_suspend(phydev);
+ }
+
+ static void kszphy_parse_led_mode(struct phy_device *phydev)
+@@ -2092,7 +2134,9 @@ static int kszphy_resume(struct phy_device *phydev)
+ {
+ int ret;
+
+- genphy_resume(phydev);
++ ret = kszphy_generic_resume(phydev);
++ if (ret)
++ return ret;
+
+ /* After switching from power-down to normal mode, an internal global
+ * reset is automatically generated. Wait a minimum of 1 ms before
+@@ -2114,6 +2158,24 @@ static int kszphy_resume(struct phy_device *phydev)
+ return 0;
+ }
+
++/* Because of errata DS80000700A, receiver error following software
++ * power down. Suspend and resume callbacks only disable and enable
++ * external rmii reference clock.
++ */
++static int ksz8041_resume(struct phy_device *phydev)
++{
++ kszphy_enable_clk(phydev);
++
++ return 0;
++}
++
++static int ksz8041_suspend(struct phy_device *phydev)
++{
++ kszphy_disable_clk(phydev);
++
++ return 0;
++}
++
+ static int ksz9477_resume(struct phy_device *phydev)
+ {
+ int ret;
+@@ -2161,7 +2223,10 @@ static int ksz8061_resume(struct phy_device *phydev)
+ if (!(ret & BMCR_PDOWN))
+ return 0;
+
+- genphy_resume(phydev);
++ ret = kszphy_generic_resume(phydev);
++ if (ret)
++ return ret;
++
+ usleep_range(1000, 2000);
+
+ /* Re-program the value after chip is reset. */
+@@ -2179,6 +2244,11 @@ static int ksz8061_resume(struct phy_device *phydev)
+ return 0;
+ }
+
++static int ksz8061_suspend(struct phy_device *phydev)
++{
++ return kszphy_suspend(phydev);
++}
++
+ static int kszphy_probe(struct phy_device *phydev)
+ {
+ const struct kszphy_type *type = phydev->drv->driver_data;
+@@ -2219,10 +2289,14 @@ static int kszphy_probe(struct phy_device *phydev)
+ } else if (!clk) {
+ /* unnamed clock from the generic ethernet-phy binding */
+ clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, NULL);
+- if (IS_ERR(clk))
+- return PTR_ERR(clk);
+ }
+
++ if (IS_ERR(clk))
++ return PTR_ERR(clk);
++
++ clk_disable_unprepare(clk);
++ priv->clk = clk;
++
+ if (ksz8041_fiber_mode(phydev))
+ phydev->port = PORT_FIBRE;
+
+@@ -5292,6 +5366,21 @@ static int lan8841_probe(struct phy_device *phydev)
+ return 0;
+ }
+
++static int lan8804_resume(struct phy_device *phydev)
++{
++ return kszphy_resume(phydev);
++}
++
++static int lan8804_suspend(struct phy_device *phydev)
++{
++ return kszphy_generic_suspend(phydev);
++}
++
++static int lan8841_resume(struct phy_device *phydev)
++{
++ return kszphy_generic_resume(phydev);
++}
++
+ static int lan8841_suspend(struct phy_device *phydev)
+ {
+ struct kszphy_priv *priv = phydev->priv;
+@@ -5300,7 +5389,7 @@ static int lan8841_suspend(struct phy_device *phydev)
+ if (ptp_priv->ptp_clock)
+ ptp_cancel_worker_sync(ptp_priv->ptp_clock);
+
+- return genphy_suspend(phydev);
++ return kszphy_generic_suspend(phydev);
+ }
+
+ static struct phy_driver ksphy_driver[] = {
+@@ -5360,9 +5449,8 @@ static struct phy_driver ksphy_driver[] = {
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+- /* No suspend/resume callbacks because of errata DS80000700A,
+- * receiver error following software power down.
+- */
++ .suspend = ksz8041_suspend,
++ .resume = ksz8041_resume,
+ }, {
+ .phy_id = PHY_ID_KSZ8041RNLI,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+@@ -5438,7 +5526,7 @@ static struct phy_driver ksphy_driver[] = {
+ .soft_reset = genphy_soft_reset,
+ .config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
+- .suspend = kszphy_suspend,
++ .suspend = ksz8061_suspend,
+ .resume = ksz8061_resume,
+ }, {
+ .phy_id = PHY_ID_KSZ9021,
+@@ -5509,8 +5597,8 @@ static struct phy_driver ksphy_driver[] = {
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+- .suspend = genphy_suspend,
+- .resume = kszphy_resume,
++ .suspend = lan8804_suspend,
++ .resume = lan8804_resume,
+ .config_intr = lan8804_config_intr,
+ .handle_interrupt = lan8804_handle_interrupt,
+ }, {
+@@ -5528,7 +5616,7 @@ static struct phy_driver ksphy_driver[] = {
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = lan8841_suspend,
+- .resume = genphy_resume,
++ .resume = lan8841_resume,
+ .cable_test_start = lan8814_cable_test_start,
+ .cable_test_get_status = ksz886x_cable_test_get_status,
+ }, {
+diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
+index 5c4e88be46ee33..8797ca1a8a219c 100644
+--- a/drivers/net/pse-pd/tps23881.c
++++ b/drivers/net/pse-pd/tps23881.c
+@@ -64,15 +64,11 @@ static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
+ if (id >= TPS23881_MAX_CHANS)
+ return -ERANGE;
+
+- ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
+- if (ret < 0)
+- return ret;
+-
+ chan = priv->port[id].chan[0];
+ if (chan < 4)
+- val = (u16)(ret | BIT(chan));
++ val = BIT(chan);
+ else
+- val = (u16)(ret | BIT(chan + 4));
++ val = BIT(chan + 4);
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+@@ -100,15 +96,11 @@ static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
+ if (id >= TPS23881_MAX_CHANS)
+ return -ERANGE;
+
+- ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
+- if (ret < 0)
+- return ret;
+-
+ chan = priv->port[id].chan[0];
+ if (chan < 4)
+- val = (u16)(ret | BIT(chan + 4));
++ val = BIT(chan + 4);
+ else
+- val = (u16)(ret | BIT(chan + 8));
++ val = BIT(chan + 8);
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 0c011d8f5d4db2..9fe7f704a2f7b8 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1365,6 +1365,9 @@ static const struct usb_device_id products[] = {
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c0, 0)}, /* Telit FE910C04 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c4, 0)}, /* Telit FE910C04 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c8, 0)}, /* Telit FE910C04 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
+diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+index fa1be8c54d3c1a..c18c6e933f478e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
++++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+@@ -161,6 +161,7 @@ const struct iwl_cfg_trans_params iwl_gl_trans_cfg = {
+
+ const char iwl_bz_name[] = "Intel(R) TBD Bz device";
+ const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
++const char iwl_wh_name[] = "Intel(R) Wi-Fi 7 BE211 320MHz";
+ const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
+ const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+index 34c91deca57b1b..17721bb47e2511 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+@@ -545,6 +545,7 @@ extern const char iwl_ax231_name[];
+ extern const char iwl_ax411_name[];
+ extern const char iwl_bz_name[];
+ extern const char iwl_fm_name[];
++extern const char iwl_wh_name[];
+ extern const char iwl_gl_name[];
+ extern const char iwl_mtp_name[];
+ extern const char iwl_sc_name[];
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+index 1a814eb6743e80..6a4300c01d41d1 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+@@ -2871,6 +2871,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
+ int idx)
+ {
+ int i;
++ int n_channels = 0;
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
+@@ -2879,7 +2880,7 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
+ if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
+- match->channels[match->n_channels++] =
++ match->channels[n_channels++] =
+ mvm->nd_channels[i]->center_freq;
+ } else {
+ struct iwl_scan_offload_profile_match_v1 *matches =
+@@ -2887,9 +2888,11 @@ static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
+ if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
+- match->channels[match->n_channels++] =
++ match->channels[n_channels++] =
+ mvm->nd_channels[i]->center_freq;
+ }
++ /* We may have ended up with fewer channels than we allocated. */
++ match->n_channels = n_channels;
+ }
+
+ /**
+@@ -2970,6 +2973,8 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
+ GFP_KERNEL);
+ if (!net_detect || !n_matches)
+ goto out_report_nd;
++ net_detect->n_matches = n_matches;
++ n_matches = 0;
+
+ for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
+ struct cfg80211_wowlan_nd_match *match;
+@@ -2983,8 +2988,9 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
+ GFP_KERNEL);
+ if (!match)
+ goto out_report_nd;
++ match->n_channels = n_channels;
+
+- net_detect->matches[net_detect->n_matches++] = match;
++ net_detect->matches[n_matches++] = match;
+
+ /* We inverted the order of the SSIDs in the scan
+ * request, so invert the index here.
+@@ -2999,6 +3005,8 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
+
+ iwl_mvm_query_set_freqs(mvm, d3_data->nd_results, match, i);
+ }
++ /* We may have fewer matches than we allocated. */
++ net_detect->n_matches = n_matches;
+
+ out_report_nd:
+ wakeup.net_detect = net_detect;
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+index 805fb249a0c6a2..8fb2aa28224212 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+@@ -1106,19 +1106,54 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
+ iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
+
+ /* Bz */
+-/* FIXME: need to change the naming according to the actual CRF */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
++ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++ iwl_cfg_bz, iwl_ax201_name),
++
++ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
++ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
++ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++ iwl_cfg_bz, iwl_ax211_name),
++
++ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
++ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_fm_name),
+
++ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
++ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
++ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++ iwl_cfg_bz, iwl_wh_name),
++
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
++ IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++ iwl_cfg_bz, iwl_ax201_name),
++
++ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++ IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
++ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY, IWL_CFG_ANY,
++ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++ iwl_cfg_bz, iwl_ax211_name),
++
++ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++ IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
++ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_fm_name),
+
++ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
++ IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
++ IWL_CFG_RF_TYPE_WH, IWL_CFG_ANY, IWL_CFG_ANY,
++ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
++ iwl_cfg_bz, iwl_wh_name),
++
+ /* Ga (Gl) */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
+index 63eb08c43c0517..6764c13530b9bd 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_mmio.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
+@@ -104,7 +104,7 @@ struct iosm_mmio *ipc_mmio_init(void __iomem *mmio, struct device *dev)
+ break;
+
+ msleep(20);
+- } while (retries-- > 0);
++ } while (--retries > 0);
+
+ if (!retries) {
+ dev_err(ipc_mmio->dev, "invalid exec stage %X", stage);
+diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+index 3931c7a13f5ab2..cbdbb91e8381fc 100644
+--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+@@ -104,14 +104,21 @@ void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
+ fsm_state_notify(ctl->md, state);
+ }
+
++static void fsm_release_command(struct kref *ref)
++{
++ struct t7xx_fsm_command *cmd = container_of(ref, typeof(*cmd), refcnt);
++
++ kfree(cmd);
++}
++
+ static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
+ {
+ if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+- *cmd->ret = result;
+- complete_all(cmd->done);
++ cmd->result = result;
++ complete_all(&cmd->done);
+ }
+
+- kfree(cmd);
++ kref_put(&cmd->refcnt, fsm_release_command);
+ }
+
+ static void fsm_del_kf_event(struct t7xx_fsm_event *event)
+@@ -475,7 +482,6 @@ static int fsm_main_thread(void *data)
+
+ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
+ {
+- DECLARE_COMPLETION_ONSTACK(done);
+ struct t7xx_fsm_command *cmd;
+ unsigned long flags;
+ int ret;
+@@ -487,11 +493,13 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id
+ INIT_LIST_HEAD(&cmd->entry);
+ cmd->cmd_id = cmd_id;
+ cmd->flag = flag;
++ kref_init(&cmd->refcnt);
+ if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+- cmd->done = &done;
+- cmd->ret = &ret;
++ init_completion(&cmd->done);
++ kref_get(&cmd->refcnt);
+ }
+
++ kref_get(&cmd->refcnt);
+ spin_lock_irqsave(&ctl->command_lock, flags);
+ list_add_tail(&cmd->entry, &ctl->command_queue);
+ spin_unlock_irqrestore(&ctl->command_lock, flags);
+@@ -501,11 +509,11 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id
+ if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+ unsigned long wait_ret;
+
+- wait_ret = wait_for_completion_timeout(&done,
++ wait_ret = wait_for_completion_timeout(&cmd->done,
+ msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
+- if (!wait_ret)
+- return -ETIMEDOUT;
+
++ ret = wait_ret ? cmd->result : -ETIMEDOUT;
++ kref_put(&cmd->refcnt, fsm_release_command);
+ return ret;
+ }
+
+diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+index 7b0a9baf488c18..6e0601bb752e51 100644
+--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h
++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+@@ -110,8 +110,9 @@ struct t7xx_fsm_command {
+ struct list_head entry;
+ enum t7xx_fsm_cmd_state cmd_id;
+ unsigned int flag;
+- struct completion *done;
+- int *ret;
++ struct completion done;
++ int result;
++ struct kref refcnt;
+ };
+
+ struct t7xx_fsm_notifier {
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 093cb423f536be..61bba5513de05a 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -173,6 +173,11 @@ enum nvme_quirks {
+ * MSI (but not MSI-X) interrupts are broken and never fire.
+ */
+ NVME_QUIRK_BROKEN_MSI = (1 << 21),
++
++ /*
++ * Align dma pool segment size to 512 bytes
++ */
++ NVME_QUIRK_DMAPOOL_ALIGN_512 = (1 << 22),
+ };
+
+ /*
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 55af3dfbc2607b..76b3f7b396c86b 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2690,15 +2690,20 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
+
+ static int nvme_setup_prp_pools(struct nvme_dev *dev)
+ {
++ size_t small_align = 256;
++
+ dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
+ NVME_CTRL_PAGE_SIZE,
+ NVME_CTRL_PAGE_SIZE, 0);
+ if (!dev->prp_page_pool)
+ return -ENOMEM;
+
++ if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512)
++ small_align = 512;
++
+ /* Optimisation for I/Os between 4k and 128k */
+ dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
+- 256, 256, 0);
++ 256, small_align, 0);
+ if (!dev->prp_small_pool) {
+ dma_pool_destroy(dev->prp_page_pool);
+ return -ENOMEM;
+@@ -3446,7 +3451,7 @@ static const struct pci_device_id nvme_id_table[] = {
+ { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
+- .driver_data = NVME_QUIRK_QDEPTH_ONE },
++ .driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, },
+ { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_BOGUS_NID, },
+diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
+index 685e89b35d330d..cfbab198693b03 100644
+--- a/drivers/nvme/target/configfs.c
++++ b/drivers/nvme/target/configfs.c
+@@ -2227,12 +2227,17 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
+ const char *page, size_t count)
+ {
+ struct list_head *entry;
++ char *old_nqn, *new_nqn;
+ size_t len;
+
+ len = strcspn(page, "\n");
+ if (!len || len > NVMF_NQN_FIELD_LEN - 1)
+ return -EINVAL;
+
++ new_nqn = kstrndup(page, len, GFP_KERNEL);
++ if (!new_nqn)
++ return -ENOMEM;
++
+ down_write(&nvmet_config_sem);
+ list_for_each(entry, &nvmet_subsystems_group.cg_children) {
+ struct config_item *item =
+@@ -2241,13 +2246,15 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
+ if (!strncmp(config_item_name(item), page, len)) {
+ pr_err("duplicate NQN %s\n", config_item_name(item));
+ up_write(&nvmet_config_sem);
++ kfree(new_nqn);
+ return -EINVAL;
+ }
+ }
+- memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN);
+- memcpy(nvmet_disc_subsys->subsysnqn, page, len);
++ old_nqn = nvmet_disc_subsys->subsysnqn;
++ nvmet_disc_subsys->subsysnqn = new_nqn;
+ up_write(&nvmet_config_sem);
+
++ kfree(old_nqn);
+ return len;
+ }
+
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
+index 737d0ae3d0b662..f384c72d955452 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
+@@ -86,6 +86,7 @@ const struct regmap_config mcp23x08_regmap = {
+ .num_reg_defaults = ARRAY_SIZE(mcp23x08_defaults),
+ .cache_type = REGCACHE_FLAT,
+ .max_register = MCP_OLAT,
++ .disable_locking = true, /* mcp->lock protects the regmap */
+ };
+ EXPORT_SYMBOL_GPL(mcp23x08_regmap);
+
+@@ -132,6 +133,7 @@ const struct regmap_config mcp23x17_regmap = {
+ .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
+ .cache_type = REGCACHE_FLAT,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
++ .disable_locking = true, /* mcp->lock protects the regmap */
+ };
+ EXPORT_SYMBOL_GPL(mcp23x17_regmap);
+
+@@ -228,7 +230,9 @@ static int mcp_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
++ mutex_lock(&mcp->lock);
+ ret = mcp_read(mcp, MCP_GPPU, &data);
++ mutex_unlock(&mcp->lock);
+ if (ret < 0)
+ return ret;
+ status = (data & BIT(pin)) ? 1 : 0;
+@@ -257,7 +261,9 @@ static int mcp_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
++ mutex_lock(&mcp->lock);
+ ret = mcp_set_bit(mcp, MCP_GPPU, pin, arg);
++ mutex_unlock(&mcp->lock);
+ break;
+ default:
+ dev_dbg(mcp->dev, "Invalid config param %04x\n", param);
+diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
+index 8c05e0dd2a218e..3ba9c43d5516ae 100644
+--- a/drivers/platform/x86/hp/hp-wmi.c
++++ b/drivers/platform/x86/hp/hp-wmi.c
+@@ -64,7 +64,7 @@ static const char * const omen_thermal_profile_boards[] = {
+ "874A", "8603", "8604", "8748", "886B", "886C", "878A", "878B", "878C",
+ "88C8", "88CB", "8786", "8787", "8788", "88D1", "88D2", "88F4", "88FD",
+ "88F5", "88F6", "88F7", "88FE", "88FF", "8900", "8901", "8902", "8912",
+- "8917", "8918", "8949", "894A", "89EB", "8BAD", "8A42"
++ "8917", "8918", "8949", "894A", "89EB", "8BAD", "8A42", "8A15"
+ };
+
+ /* DMI Board names of Omen laptops that are specifically set to be thermal
+@@ -80,7 +80,7 @@ static const char * const omen_thermal_profile_force_v0_boards[] = {
+ * "balanced" when reaching zero.
+ */
+ static const char * const omen_timed_thermal_profile_boards[] = {
+- "8BAD", "8A42"
++ "8BAD", "8A42", "8A15"
+ };
+
+ /* DMI Board names of Victus laptops */
+diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
+index 9d70146fd7420a..1a09f2dfb7bca0 100644
+--- a/drivers/platform/x86/mlx-platform.c
++++ b/drivers/platform/x86/mlx-platform.c
+@@ -6237,6 +6237,7 @@ mlxplat_pci_fpga_device_init(unsigned int device, const char *res_name, struct p
+ fail_pci_request_regions:
+ pci_disable_device(pci_dev);
+ fail_pci_enable_device:
++ pci_dev_put(pci_dev);
+ return err;
+ }
+
+@@ -6247,6 +6248,7 @@ mlxplat_pci_fpga_device_exit(struct pci_dev *pci_bridge,
+ iounmap(pci_bridge_addr);
+ pci_release_regions(pci_bridge);
+ pci_disable_device(pci_bridge);
++ pci_dev_put(pci_bridge);
+ }
+
+ static int
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 6371a9f765c139..2cfb2ac3f465aa 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -184,7 +184,8 @@ enum tpacpi_hkey_event_t {
+ */
+ TP_HKEY_EV_AMT_TOGGLE = 0x131a, /* Toggle AMT on/off */
+ TP_HKEY_EV_DOUBLETAP_TOGGLE = 0x131c, /* Toggle trackpoint doubletap on/off */
+- TP_HKEY_EV_PROFILE_TOGGLE = 0x131f, /* Toggle platform profile */
++ TP_HKEY_EV_PROFILE_TOGGLE = 0x131f, /* Toggle platform profile in 2024 systems */
++ TP_HKEY_EV_PROFILE_TOGGLE2 = 0x1401, /* Toggle platform profile in 2025 + systems */
+
+ /* Reasons for waking up from S3/S4 */
+ TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */
+@@ -11200,6 +11201,7 @@ static bool tpacpi_driver_event(const unsigned int hkey_event)
+ tp_features.trackpoint_doubletap = !tp_features.trackpoint_doubletap;
+ return true;
+ case TP_HKEY_EV_PROFILE_TOGGLE:
++ case TP_HKEY_EV_PROFILE_TOGGLE2:
+ platform_profile_cycle();
+ return true;
+ }
+diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
+index 778ff187ac59e6..88819659df83a2 100644
+--- a/drivers/pmdomain/core.c
++++ b/drivers/pmdomain/core.c
+@@ -2141,6 +2141,11 @@ static int genpd_set_default_power_state(struct generic_pm_domain *genpd)
+ return 0;
+ }
+
++static void genpd_provider_release(struct device *dev)
++{
++ /* nothing to be done here */
++}
++
+ static int genpd_alloc_data(struct generic_pm_domain *genpd)
+ {
+ struct genpd_governor_data *gd = NULL;
+@@ -2172,6 +2177,7 @@ static int genpd_alloc_data(struct generic_pm_domain *genpd)
+
+ genpd->gd = gd;
+ device_initialize(&genpd->dev);
++ genpd->dev.release = genpd_provider_release;
+
+ if (!genpd_is_dev_name_fw(genpd)) {
+ dev_set_name(&genpd->dev, "%s", genpd->name);
+diff --git a/drivers/pmdomain/imx/gpcv2.c b/drivers/pmdomain/imx/gpcv2.c
+index 3f0e6960f47fc2..e03c2cb39a6936 100644
+--- a/drivers/pmdomain/imx/gpcv2.c
++++ b/drivers/pmdomain/imx/gpcv2.c
+@@ -1458,12 +1458,12 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
+ .max_register = SZ_4K,
+ };
+ struct device *dev = &pdev->dev;
+- struct device_node *pgc_np;
++ struct device_node *pgc_np __free(device_node) =
++ of_get_child_by_name(dev->of_node, "pgc");
+ struct regmap *regmap;
+ void __iomem *base;
+ int ret;
+
+- pgc_np = of_get_child_by_name(dev->of_node, "pgc");
+ if (!pgc_np) {
+ dev_err(dev, "No power domains specified in DT\n");
+ return -EINVAL;
+diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
+index 1755ca026f08ff..73b1edd0531b43 100644
+--- a/drivers/spi/spi-cadence-quadspi.c
++++ b/drivers/spi/spi-cadence-quadspi.c
+@@ -43,6 +43,7 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX);
+ #define CQSPI_SLOW_SRAM BIT(4)
+ #define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5)
+ #define CQSPI_RD_NO_IRQ BIT(6)
++#define CQSPI_DISABLE_STIG_MODE BIT(7)
+
+ /* Capabilities */
+ #define CQSPI_SUPPORTS_OCTAL BIT(0)
+@@ -103,6 +104,7 @@ struct cqspi_st {
+ bool apb_ahb_hazard;
+
+ bool is_jh7110; /* Flag for StarFive JH7110 SoC */
++ bool disable_stig_mode;
+
+ const struct cqspi_driver_platdata *ddata;
+ };
+@@ -1416,7 +1418,8 @@ static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
+ * reads, prefer STIG mode for such small reads.
+ */
+ if (!op->addr.nbytes ||
+- op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX)
++ (op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX &&
++ !cqspi->disable_stig_mode))
+ return cqspi_command_read(f_pdata, op);
+
+ return cqspi_read(f_pdata, op);
+@@ -1880,6 +1883,8 @@ static int cqspi_probe(struct platform_device *pdev)
+ if (ret)
+ goto probe_reset_failed;
+ }
++ if (ddata->quirks & CQSPI_DISABLE_STIG_MODE)
++ cqspi->disable_stig_mode = true;
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "xlnx,versal-ospi-1.0")) {
+@@ -2043,7 +2048,8 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
+ static const struct cqspi_driver_platdata socfpga_qspi = {
+ .quirks = CQSPI_DISABLE_DAC_MODE
+ | CQSPI_NO_SUPPORT_WR_COMPLETION
+- | CQSPI_SLOW_SRAM,
++ | CQSPI_SLOW_SRAM
++ | CQSPI_DISABLE_STIG_MODE,
+ };
+
+ static const struct cqspi_driver_platdata versal_ospi = {
+diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
+index 3d2376caedfa68..5fd0b39d8c703b 100644
+--- a/fs/btrfs/bio.c
++++ b/fs/btrfs/bio.c
+@@ -81,6 +81,9 @@ static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
+
+ bio = bio_split(&orig_bbio->bio, map_length >> SECTOR_SHIFT, GFP_NOFS,
+ &btrfs_clone_bioset);
++ if (IS_ERR(bio))
++ return ERR_CAST(bio);
++
+ bbio = btrfs_bio(bio);
+ btrfs_bio_init(bbio, fs_info, NULL, orig_bbio);
+ bbio->inode = orig_bbio->inode;
+@@ -355,7 +358,7 @@ static void btrfs_simple_end_io(struct bio *bio)
+ INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work);
+ queue_work(btrfs_end_io_wq(fs_info, bio), &bbio->end_io_work);
+ } else {
+- if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
++ if (bio_is_zone_append(bio) && !bio->bi_status)
+ btrfs_record_physical_zoned(bbio);
+ btrfs_bio_end_io(bbio, bbio->bio.bi_status);
+ }
+@@ -398,7 +401,7 @@ static void btrfs_orig_write_end_io(struct bio *bio)
+ else
+ bio->bi_status = BLK_STS_OK;
+
+- if (bio_op(bio) == REQ_OP_ZONE_APPEND && !bio->bi_status)
++ if (bio_is_zone_append(bio) && !bio->bi_status)
+ stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+
+ btrfs_bio_end_io(bbio, bbio->bio.bi_status);
+@@ -412,7 +415,7 @@ static void btrfs_clone_write_end_io(struct bio *bio)
+ if (bio->bi_status) {
+ atomic_inc(&stripe->bioc->error);
+ btrfs_log_dev_io_error(bio, stripe->dev);
+- } else if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
++ } else if (bio_is_zone_append(bio)) {
+ stripe->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ }
+
+@@ -684,7 +687,8 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+ &bioc, &smap, &mirror_num);
+ if (error) {
+ ret = errno_to_blk_status(error);
+- goto fail;
++ btrfs_bio_counter_dec(fs_info);
++ goto end_bbio;
+ }
+
+ map_length = min(map_length, length);
+@@ -692,7 +696,15 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+ map_length = btrfs_append_map_length(bbio, map_length);
+
+ if (map_length < length) {
+- bbio = btrfs_split_bio(fs_info, bbio, map_length);
++ struct btrfs_bio *split;
++
++ split = btrfs_split_bio(fs_info, bbio, map_length);
++ if (IS_ERR(split)) {
++ ret = errno_to_blk_status(PTR_ERR(split));
++ btrfs_bio_counter_dec(fs_info);
++ goto end_bbio;
++ }
++ bbio = split;
+ bio = &bbio->bio;
+ }
+
+@@ -766,6 +778,7 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
+
+ btrfs_bio_end_io(remaining, ret);
+ }
++end_bbio:
+ btrfs_bio_end_io(bbio, ret);
+ /* Do not submit another chunk */
+ return true;
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 43b7b331b2da36..563f106774e592 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4264,6 +4264,15 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ * already the cleaner, but below we run all pending delayed iputs.
+ */
+ btrfs_flush_workqueue(fs_info->fixup_workers);
++ /*
++ * Similar case here, we have to wait for delalloc workers before we
++ * proceed below and stop the cleaner kthread, otherwise we trigger a
++ * use-after-tree on the cleaner kthread task_struct when a delalloc
++ * worker running submit_compressed_extents() adds a delayed iput, which
++ * does a wake up on the cleaner kthread, which was already freed below
++ * when we call kthread_stop().
++ */
++ btrfs_flush_workqueue(fs_info->delalloc_workers);
+
+ /*
+ * After we parked the cleaner kthread, ordered extents may have
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 4b3e256e0d0b88..b5cfb85af937fc 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -10056,6 +10056,11 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ bsi.block_start = physical_block_start;
+ bsi.block_len = len;
+ }
++
++ if (fatal_signal_pending(current)) {
++ ret = -EINTR;
++ goto out;
++ }
+ }
+
+ if (bsi.block_len)
+diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
+index 2b0daced98ebb4..3404e7a30c330c 100644
+--- a/fs/ocfs2/quota_global.c
++++ b/fs/ocfs2/quota_global.c
+@@ -893,7 +893,7 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
+ int status = 0;
+
+ trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
+- if (!sb_has_quota_loaded(sb, type)) {
++ if (!sb_has_quota_active(sb, type)) {
+ status = -ESRCH;
+ goto out;
+ }
+diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
+index 73d3367c533b8a..2956d888c13145 100644
+--- a/fs/ocfs2/quota_local.c
++++ b/fs/ocfs2/quota_local.c
+@@ -867,6 +867,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
+ brelse(oinfo->dqi_libh);
+ brelse(oinfo->dqi_lqi_bh);
+ kfree(oinfo);
++ info->dqi_priv = NULL;
+ return status;
+ }
+
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 7eb010de39fe26..536b7dc4538182 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1810,7 +1810,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ }
+
+ for (; addr != end; addr += PAGE_SIZE, idx++) {
+- unsigned long cur_flags = flags;
++ u64 cur_flags = flags;
+ pagemap_entry_t pme;
+
+ if (folio && (flags & PM_PRESENT) &&
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index bf909c2f6b963b..0ceebde38f9fe0 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -2018,6 +2018,7 @@ exit_cifs(void)
+ destroy_workqueue(decrypt_wq);
+ destroy_workqueue(fileinfo_put_wq);
+ destroy_workqueue(serverclose_wq);
++ destroy_workqueue(cfid_put_wq);
+ destroy_workqueue(cifsiod_wq);
+ cifs_proc_clean();
+ }
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index 7d01dd313351f7..04ffc5b158c3bf 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -4224,6 +4224,7 @@ static bool __query_dir(struct dir_context *ctx, const char *name, int namlen,
+ /* dot and dotdot entries are already reserved */
+ if (!strcmp(".", name) || !strcmp("..", name))
+ return true;
++ d_info->num_scan++;
+ if (ksmbd_share_veto_filename(priv->work->tcon->share_conf, name))
+ return true;
+ if (!match_pattern(name, namlen, priv->search_pattern))
+@@ -4384,8 +4385,17 @@ int smb2_query_dir(struct ksmbd_work *work)
+ query_dir_private.info_level = req->FileInformationClass;
+ dir_fp->readdir_data.private = &query_dir_private;
+ set_ctx_actor(&dir_fp->readdir_data.ctx, __query_dir);
+-
++again:
++ d_info.num_scan = 0;
+ rc = iterate_dir(dir_fp->filp, &dir_fp->readdir_data.ctx);
++ /*
++ * num_entry can be 0 if the directory iteration stops before reaching
++ * the end of the directory and no file is matched with the search
++ * pattern.
++ */
++ if (rc >= 0 && !d_info.num_entry && d_info.num_scan &&
++ d_info.out_buf_len > 0)
++ goto again;
+ /*
+ * req->OutputBufferLength is too small to contain even one entry.
+ * In this case, it immediately returns OutputBufferLength 0 to client.
+@@ -6006,15 +6016,13 @@ static int set_file_basic_info(struct ksmbd_file *fp,
+ attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
+ }
+
+- attrs.ia_valid |= ATTR_CTIME;
+ if (file_info->ChangeTime)
+- attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
+- else
+- attrs.ia_ctime = inode_get_ctime(inode);
++ inode_set_ctime_to_ts(inode,
++ ksmbd_NTtimeToUnix(file_info->ChangeTime));
+
+ if (file_info->LastWriteTime) {
+ attrs.ia_mtime = ksmbd_NTtimeToUnix(file_info->LastWriteTime);
+- attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
++ attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME);
+ }
+
+ if (file_info->Attributes) {
+@@ -6056,8 +6064,6 @@ static int set_file_basic_info(struct ksmbd_file *fp,
+ return -EACCES;
+
+ inode_lock(inode);
+- inode_set_ctime_to_ts(inode, attrs.ia_ctime);
+- attrs.ia_valid &= ~ATTR_CTIME;
+ rc = notify_change(idmap, dentry, &attrs, NULL);
+ inode_unlock(inode);
+ }
+diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
+index cb76f4b5bafe8c..06903024a2d88b 100644
+--- a/fs/smb/server/vfs.h
++++ b/fs/smb/server/vfs.h
+@@ -43,6 +43,7 @@ struct ksmbd_dir_info {
+ char *rptr;
+ int name_len;
+ int out_buf_len;
++ int num_scan;
+ int num_entry;
+ int data_count;
+ int last_entry_offset;
+diff --git a/include/linux/bio.h b/include/linux/bio.h
+index faceadb040f9ac..66b7620a1b5333 100644
+--- a/include/linux/bio.h
++++ b/include/linux/bio.h
+@@ -677,6 +677,23 @@ static inline void bio_clear_polled(struct bio *bio)
+ bio->bi_opf &= ~REQ_POLLED;
+ }
+
++/**
++ * bio_is_zone_append - is this a zone append bio?
++ * @bio: bio to check
++ *
++ * Check if @bio is a zone append operation. Core block layer code and end_io
++ * handlers must use this instead of an open coded REQ_OP_ZONE_APPEND check
++ * because the block layer can rewrite REQ_OP_ZONE_APPEND to REQ_OP_WRITE if
++ * it is not natively supported.
++ */
++static inline bool bio_is_zone_append(struct bio *bio)
++{
++ if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED))
++ return false;
++ return bio_op(bio) == REQ_OP_ZONE_APPEND ||
++ bio_flagged(bio, BIO_EMULATES_ZONE_APPEND);
++}
++
+ struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
+ unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
+ struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new);
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index 7d7578a8eac10b..5118caf8aa1c70 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -1121,7 +1121,7 @@ bool bpf_jit_supports_arena(void);
+ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena);
+ u64 bpf_arch_uaddress_limit(void);
+ void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
+-bool bpf_helper_changes_pkt_data(void *func);
++bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id);
+
+ static inline bool bpf_dump_raw_ok(const struct cred *cred)
+ {
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index c1645c86eed969..d65b5d71b93bf8 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -585,13 +585,16 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
+ * vlan_get_protocol - get protocol EtherType.
+ * @skb: skbuff to query
+ * @type: first vlan protocol
++ * @mac_offset: MAC offset
+ * @depth: buffer to store length of eth and vlan tags in bytes
+ *
+ * Returns the EtherType of the packet, regardless of whether it is
+ * vlan encapsulated (normal or hardware accelerated) or not.
+ */
+-static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
+- int *depth)
++static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
++ __be16 type,
++ int mac_offset,
++ int *depth)
+ {
+ unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
+
+@@ -610,7 +613,8 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
+ do {
+ struct vlan_hdr vhdr, *vh;
+
+- vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
++ vh = skb_header_pointer(skb, mac_offset + vlan_depth,
++ sizeof(vhdr), &vhdr);
+ if (unlikely(!vh || !--parse_depth))
+ return 0;
+
+@@ -625,6 +629,12 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
+ return type;
+ }
+
++static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
++ int *depth)
++{
++ return __vlan_get_protocol_offset(skb, type, 0, depth);
++}
++
+ /**
+ * vlan_get_protocol - get protocol EtherType.
+ * @skb: skbuff to query
+diff --git a/include/linux/memfd.h b/include/linux/memfd.h
+index 3f2cf339ceafd9..d437e30708502e 100644
+--- a/include/linux/memfd.h
++++ b/include/linux/memfd.h
+@@ -7,6 +7,7 @@
+ #ifdef CONFIG_MEMFD_CREATE
+ extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg);
+ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
++unsigned int *memfd_file_seals_ptr(struct file *file);
+ #else
+ static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
+ {
+@@ -16,6 +17,19 @@ static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
+ {
+ return ERR_PTR(-EINVAL);
+ }
++
++static inline unsigned int *memfd_file_seals_ptr(struct file *file)
++{
++ return NULL;
++}
+ #endif
+
++/* Retrieve memfd seals associated with the file, if any. */
++static inline unsigned int memfd_file_seals(struct file *file)
++{
++ unsigned int *sealsp = memfd_file_seals_ptr(file);
++
++ return sealsp ? *sealsp : 0;
++}
++
+ #endif /* __LINUX_MEMFD_H */
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index e23c692a34c702..82c7056e27599e 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -555,6 +555,7 @@ enum {
+ * creation/deletion on drivers rescan. Unset during device attach.
+ */
+ MLX5_PRIV_FLAGS_DETACH = 1 << 2,
++ MLX5_PRIV_FLAGS_SWITCH_LEGACY = 1 << 3,
+ };
+
+ struct mlx5_adev {
+@@ -1233,6 +1234,12 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
+ return dev->coredev_type == MLX5_COREDEV_VF;
+ }
+
++static inline bool mlx5_core_same_coredev_type(const struct mlx5_core_dev *dev1,
++ const struct mlx5_core_dev *dev2)
++{
++ return dev1->coredev_type == dev2->coredev_type;
++}
++
+ static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
+ {
+ return dev->caps.embedded_cpu;
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 96d369112bfa03..512e25c416ae29 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -2113,7 +2113,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
+ u8 migration_in_chunks[0x1];
+ u8 reserved_at_d1[0x1];
+ u8 sf_eq_usage[0x1];
+- u8 reserved_at_d3[0xd];
++ u8 reserved_at_d3[0x5];
++ u8 multiplane[0x1];
++ u8 reserved_at_d9[0x7];
+
+ u8 cross_vhca_object_to_object_supported[0x20];
+
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 61fff5d34ed532..8617adc6becd1f 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -3100,6 +3100,7 @@ static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
+ if (!pmd_ptlock_init(ptdesc))
+ return false;
+ __folio_set_pgtable(folio);
++ ptdesc_pmd_pts_init(ptdesc);
+ lruvec_stat_add_folio(folio, NR_PAGETABLE);
+ return true;
+ }
+@@ -4079,6 +4080,37 @@ void mem_dump_obj(void *object);
+ static inline void mem_dump_obj(void *object) {}
+ #endif
+
++static inline bool is_write_sealed(int seals)
++{
++ return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE);
++}
++
++/**
++ * is_readonly_sealed - Checks whether write-sealed but mapped read-only,
++ * in which case writes should be disallowing moving
++ * forwards.
++ * @seals: the seals to check
++ * @vm_flags: the VMA flags to check
++ *
++ * Returns whether readonly sealed, in which case writess should be disallowed
++ * going forward.
++ */
++static inline bool is_readonly_sealed(int seals, vm_flags_t vm_flags)
++{
++ /*
++ * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
++ * MAP_SHARED and read-only, take care to not allow mprotect to
++ * revert protections on such mappings. Do this only for shared
++ * mappings. For private mappings, don't need to mask
++ * VM_MAYWRITE as we still want them to be COW-writable.
++ */
++ if (is_write_sealed(seals) &&
++ ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_SHARED))
++ return true;
++
++ return false;
++}
++
+ /**
+ * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and
+ * handle them.
+@@ -4090,24 +4122,15 @@ static inline void mem_dump_obj(void *object) {}
+ */
+ static inline int seal_check_write(int seals, struct vm_area_struct *vma)
+ {
+- if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
+- /*
+- * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
+- * write seals are active.
+- */
+- if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
+- return -EPERM;
+-
+- /*
+- * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
+- * MAP_SHARED and read-only, take care to not allow mprotect to
+- * revert protections on such mappings. Do this only for shared
+- * mappings. For private mappings, don't need to mask
+- * VM_MAYWRITE as we still want them to be COW-writable.
+- */
+- if (vma->vm_flags & VM_SHARED)
+- vm_flags_clear(vma, VM_MAYWRITE);
+- }
++ if (!is_write_sealed(seals))
++ return 0;
++
++ /*
++ * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
++ * write seals are active.
++ */
++ if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
++ return -EPERM;
+
+ return 0;
+ }
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 6e3bdf8e38bcae..6894de506b364f 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -445,6 +445,7 @@ FOLIO_MATCH(compound_head, _head_2a);
+ * @pt_index: Used for s390 gmap.
+ * @pt_mm: Used for x86 pgds.
+ * @pt_frag_refcount: For fragmented page table tracking. Powerpc only.
++ * @pt_share_count: Used for HugeTLB PMD page table share count.
+ * @_pt_pad_2: Padding to ensure proper alignment.
+ * @ptl: Lock for the page table.
+ * @__page_type: Same as page->page_type. Unused for page tables.
+@@ -471,6 +472,9 @@ struct ptdesc {
+ pgoff_t pt_index;
+ struct mm_struct *pt_mm;
+ atomic_t pt_frag_refcount;
++#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
++ atomic_t pt_share_count;
++#endif
+ };
+
+ union {
+@@ -516,6 +520,32 @@ static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
+ const struct page *: (const struct ptdesc *)(p), \
+ struct page *: (struct ptdesc *)(p)))
+
++#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
++static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
++{
++ atomic_set(&ptdesc->pt_share_count, 0);
++}
++
++static inline void ptdesc_pmd_pts_inc(struct ptdesc *ptdesc)
++{
++ atomic_inc(&ptdesc->pt_share_count);
++}
++
++static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc)
++{
++ atomic_dec(&ptdesc->pt_share_count);
++}
++
++static inline int ptdesc_pmd_pts_count(struct ptdesc *ptdesc)
++{
++ return atomic_read(&ptdesc->pt_share_count);
++}
++#else
++static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc)
++{
++}
++#endif
++
+ /*
+ * Used for sizing the vmemmap region on some architectures
+ */
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index c95f7e6ba25514..ba7b52584770d7 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -804,7 +804,6 @@ struct hci_conn_params {
+ extern struct list_head hci_dev_list;
+ extern struct list_head hci_cb_list;
+ extern rwlock_t hci_dev_list_lock;
+-extern struct mutex hci_cb_list_lock;
+
+ #define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags)
+ #define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags)
+@@ -2007,24 +2006,47 @@ struct hci_cb {
+
+ char *name;
+
++ bool (*match) (struct hci_conn *conn);
+ void (*connect_cfm) (struct hci_conn *conn, __u8 status);
+ void (*disconn_cfm) (struct hci_conn *conn, __u8 status);
+ void (*security_cfm) (struct hci_conn *conn, __u8 status,
+- __u8 encrypt);
++ __u8 encrypt);
+ void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
+ void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
+ };
+
++static inline void hci_cb_lookup(struct hci_conn *conn, struct list_head *list)
++{
++ struct hci_cb *cb, *cpy;
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(cb, &hci_cb_list, list) {
++ if (cb->match && cb->match(conn)) {
++ cpy = kmalloc(sizeof(*cpy), GFP_ATOMIC);
++ if (!cpy)
++ break;
++
++ *cpy = *cb;
++ INIT_LIST_HEAD(&cpy->list);
++ list_add_rcu(&cpy->list, list);
++ }
++ }
++ rcu_read_unlock();
++}
++
+ static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
+ {
+- struct hci_cb *cb;
++ struct list_head list;
++ struct hci_cb *cb, *tmp;
++
++ INIT_LIST_HEAD(&list);
++ hci_cb_lookup(conn, &list);
+
+- mutex_lock(&hci_cb_list_lock);
+- list_for_each_entry(cb, &hci_cb_list, list) {
++ list_for_each_entry_safe(cb, tmp, &list, list) {
+ if (cb->connect_cfm)
+ cb->connect_cfm(conn, status);
++ kfree(cb);
+ }
+- mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->connect_cfm_cb)
+ conn->connect_cfm_cb(conn, status);
+@@ -2032,43 +2054,55 @@ static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
+
+ static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
+ {
+- struct hci_cb *cb;
++ struct list_head list;
++ struct hci_cb *cb, *tmp;
++
++ INIT_LIST_HEAD(&list);
++ hci_cb_lookup(conn, &list);
+
+- mutex_lock(&hci_cb_list_lock);
+- list_for_each_entry(cb, &hci_cb_list, list) {
++ list_for_each_entry_safe(cb, tmp, &list, list) {
+ if (cb->disconn_cfm)
+ cb->disconn_cfm(conn, reason);
++ kfree(cb);
+ }
+- mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->disconn_cfm_cb)
+ conn->disconn_cfm_cb(conn, reason);
+ }
+
+-static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
++static inline void hci_security_cfm(struct hci_conn *conn, __u8 status,
++ __u8 encrypt)
+ {
+- struct hci_cb *cb;
+- __u8 encrypt;
+-
+- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
+- return;
++ struct list_head list;
++ struct hci_cb *cb, *tmp;
+
+- encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
++ INIT_LIST_HEAD(&list);
++ hci_cb_lookup(conn, &list);
+
+- mutex_lock(&hci_cb_list_lock);
+- list_for_each_entry(cb, &hci_cb_list, list) {
++ list_for_each_entry_safe(cb, tmp, &list, list) {
+ if (cb->security_cfm)
+ cb->security_cfm(conn, status, encrypt);
++ kfree(cb);
+ }
+- mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
+ }
+
++static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
++{
++ __u8 encrypt;
++
++ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
++ return;
++
++ encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
++
++ hci_security_cfm(conn, status, encrypt);
++}
++
+ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
+ {
+- struct hci_cb *cb;
+ __u8 encrypt;
+
+ if (conn->state == BT_CONFIG) {
+@@ -2095,40 +2129,38 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
+ conn->sec_level = conn->pending_sec_level;
+ }
+
+- mutex_lock(&hci_cb_list_lock);
+- list_for_each_entry(cb, &hci_cb_list, list) {
+- if (cb->security_cfm)
+- cb->security_cfm(conn, status, encrypt);
+- }
+- mutex_unlock(&hci_cb_list_lock);
+-
+- if (conn->security_cfm_cb)
+- conn->security_cfm_cb(conn, status);
++ hci_security_cfm(conn, status, encrypt);
+ }
+
+ static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
+ {
+- struct hci_cb *cb;
++ struct list_head list;
++ struct hci_cb *cb, *tmp;
++
++ INIT_LIST_HEAD(&list);
++ hci_cb_lookup(conn, &list);
+
+- mutex_lock(&hci_cb_list_lock);
+- list_for_each_entry(cb, &hci_cb_list, list) {
++ list_for_each_entry_safe(cb, tmp, &list, list) {
+ if (cb->key_change_cfm)
+ cb->key_change_cfm(conn, status);
++ kfree(cb);
+ }
+- mutex_unlock(&hci_cb_list_lock);
+ }
+
+ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
+ __u8 role)
+ {
+- struct hci_cb *cb;
++ struct list_head list;
++ struct hci_cb *cb, *tmp;
++
++ INIT_LIST_HEAD(&list);
++ hci_cb_lookup(conn, &list);
+
+- mutex_lock(&hci_cb_list_lock);
+- list_for_each_entry(cb, &hci_cb_list, list) {
++ list_for_each_entry_safe(cb, tmp, &list, list) {
+ if (cb->role_switch_cfm)
+ cb->role_switch_cfm(conn, status, role);
++ kfree(cb);
+ }
+- mutex_unlock(&hci_cb_list_lock);
+ }
+
+ static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 91ae20cb76485b..471c353d32a4a5 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -733,15 +733,18 @@ struct nft_set_ext_tmpl {
+ /**
+ * struct nft_set_ext - set extensions
+ *
+- * @genmask: generation mask
++ * @genmask: generation mask, but also flags (see NFT_SET_ELEM_DEAD_BIT)
+ * @offset: offsets of individual extension types
+ * @data: beginning of extension data
++ *
++ * This structure must be aligned to word size, otherwise atomic bitops
++ * on genmask field can cause alignment failure on some archs.
+ */
+ struct nft_set_ext {
+ u8 genmask;
+ u8 offset[NFT_SET_EXT_NUM];
+ char data[];
+-};
++} __aligned(BITS_PER_LONG / 8);
+
+ static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
+ {
+diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
+index 94e8185c4795fe..3dc7a1551ac350 100644
+--- a/include/sound/cs35l56.h
++++ b/include/sound/cs35l56.h
+@@ -271,12 +271,6 @@ struct cs35l56_base {
+ struct gpio_desc *reset_gpio;
+ };
+
+-/* Temporary to avoid a build break with the HDA driver */
+-static inline int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base)
+-{
+- return 0;
+-}
+-
+ static inline bool cs35l56_is_otp_register(unsigned int reg)
+ {
+ return (reg >> 16) == 3;
+diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
+index d407576ddfb782..eec5eb7de8430e 100644
+--- a/io_uring/kbuf.c
++++ b/io_uring/kbuf.c
+@@ -139,6 +139,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
+ struct io_uring_buf_ring *br = bl->buf_ring;
+ __u16 tail, head = bl->head;
+ struct io_uring_buf *buf;
++ void __user *ret;
+
+ tail = smp_load_acquire(&br->tail);
+ if (unlikely(tail == head))
+@@ -153,6 +154,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
+ req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
+ req->buf_list = bl;
+ req->buf_index = buf->bid;
++ ret = u64_to_user_ptr(buf->addr);
+
+ if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
+ /*
+@@ -168,7 +170,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
+ io_kbuf_commit(req, bl, *len, 1);
+ req->buf_list = NULL;
+ }
+- return u64_to_user_ptr(buf->addr);
++ return ret;
+ }
+
+ void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
+diff --git a/io_uring/net.c b/io_uring/net.c
+index 18507658a921d7..7f549be9abd1e6 100644
+--- a/io_uring/net.c
++++ b/io_uring/net.c
+@@ -748,6 +748,7 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req)
+ if (req->opcode == IORING_OP_RECV) {
+ kmsg->msg.msg_name = NULL;
+ kmsg->msg.msg_namelen = 0;
++ kmsg->msg.msg_inq = 0;
+ kmsg->msg.msg_control = NULL;
+ kmsg->msg.msg_get_inq = 1;
+ kmsg->msg.msg_controllen = 0;
+diff --git a/io_uring/rw.c b/io_uring/rw.c
+index 155938f1009313..39ad25d16ed404 100644
+--- a/io_uring/rw.c
++++ b/io_uring/rw.c
+@@ -979,6 +979,8 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
+ io_kbuf_recycle(req, issue_flags);
+ if (ret < 0)
+ req_set_fail(req);
++ } else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
++ cflags = io_put_kbuf(req, ret, issue_flags);
+ } else {
+ /*
+ * Any successful return value will keep the multishot read
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 233ea78f8f1bd9..2b9c8c168a0ba3 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -539,6 +539,8 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
+
+ int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
+ {
++ int err;
++
+ /* Branch offsets can't overflow when program is shrinking, no need
+ * to call bpf_adj_branches(..., true) here
+ */
+@@ -546,7 +548,9 @@ int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
+ sizeof(struct bpf_insn) * (prog->len - off - cnt));
+ prog->len -= cnt;
+
+- return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
++ err = bpf_adj_branches(prog, off, off + cnt, off, false);
++ WARN_ON_ONCE(err);
++ return err;
+ }
+
+ static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
+@@ -2936,7 +2940,7 @@ void __weak bpf_jit_compile(struct bpf_prog *prog)
+ {
+ }
+
+-bool __weak bpf_helper_changes_pkt_data(void *func)
++bool __weak bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
+ {
+ return false;
+ }
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 767f1cb8c27e17..a0cab0d0252fab 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -10476,7 +10476,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
+ }
+
+ /* With LD_ABS/IND some JITs save/restore skb from r1. */
+- changes_data = bpf_helper_changes_pkt_data(fn->func);
++ changes_data = bpf_helper_changes_pkt_data(func_id);
+ if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
+ verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
+ func_id_name(func_id), func_id);
+diff --git a/kernel/kcov.c b/kernel/kcov.c
+index 28a6be6e64fdd7..187ba1b80bda16 100644
+--- a/kernel/kcov.c
++++ b/kernel/kcov.c
+@@ -166,7 +166,7 @@ static void kcov_remote_area_put(struct kcov_remote_area *area,
+ * Unlike in_serving_softirq(), this function returns false when called during
+ * a hardirq or an NMI that happened in the softirq context.
+ */
+-static inline bool in_softirq_really(void)
++static __always_inline bool in_softirq_really(void)
+ {
+ return in_serving_softirq() && !in_hardirq() && !in_nmi();
+ }
+diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
+index 79bb18651cdb8b..40f915f893e2ed 100644
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -4367,7 +4367,7 @@ static void scx_ops_bypass(bool bypass)
+ * sees scx_rq_bypassing() before moving tasks to SCX.
+ */
+ if (!scx_enabled()) {
+- rq_unlock_irqrestore(rq, &rf);
++ rq_unlock(rq, &rf);
+ continue;
+ }
+
+@@ -6637,7 +6637,7 @@ __bpf_kfunc int bpf_iter_scx_dsq_new(struct bpf_iter_scx_dsq *it, u64 dsq_id,
+ return -ENOENT;
+
+ INIT_LIST_HEAD(&kit->cursor.node);
+- kit->cursor.flags |= SCX_DSQ_LNODE_ITER_CURSOR | flags;
++ kit->cursor.flags = SCX_DSQ_LNODE_ITER_CURSOR | flags;
+ kit->cursor.priv = READ_ONCE(kit->dsq->seq);
+
+ return 0;
+diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
+index 72bcbfad53db04..c12335499ec91e 100644
+--- a/kernel/trace/fgraph.c
++++ b/kernel/trace/fgraph.c
+@@ -802,7 +802,7 @@ static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
+ #endif
+ {
+ for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) {
+- struct fgraph_ops *gops = fgraph_array[i];
++ struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
+
+ if (gops == &fgraph_stub)
+ continue;
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 3dd3b97d8049ae..cd9dbfb3038330 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -883,16 +883,13 @@ static void profile_graph_return(struct ftrace_graph_ret *trace,
+ }
+
+ static struct fgraph_ops fprofiler_ops = {
+- .ops = {
+- .flags = FTRACE_OPS_FL_INITIALIZED,
+- INIT_OPS_HASH(fprofiler_ops.ops)
+- },
+ .entryfunc = &profile_graph_entry,
+ .retfunc = &profile_graph_return,
+ };
+
+ static int register_ftrace_profiler(void)
+ {
++ ftrace_ops_set_global_filter(&fprofiler_ops.ops);
+ return register_ftrace_graph(&fprofiler_ops);
+ }
+
+@@ -903,12 +900,11 @@ static void unregister_ftrace_profiler(void)
+ #else
+ static struct ftrace_ops ftrace_profile_ops __read_mostly = {
+ .func = function_profile_call,
+- .flags = FTRACE_OPS_FL_INITIALIZED,
+- INIT_OPS_HASH(ftrace_profile_ops)
+ };
+
+ static int register_ftrace_profiler(void)
+ {
++ ftrace_ops_set_global_filter(&ftrace_profile_ops);
+ return register_ftrace_function(&ftrace_profile_ops);
+ }
+
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 7149cd6fd4795e..ea9b44847ce6b7 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -364,6 +364,18 @@ static bool process_string(const char *fmt, int len, struct trace_event_call *ca
+ s = r + 1;
+ } while (s < e);
+
++ /*
++ * Check for arrays. If the argument has: foo[REC->val]
++ * then it is very likely that foo is an array of strings
++ * that are safe to use.
++ */
++ r = strstr(s, "[");
++ if (r && r < e) {
++ r = strstr(r, "REC->");
++ if (r && r < e)
++ return true;
++ }
++
+ /*
+ * If there's any strings in the argument consider this arg OK as it
+ * could be: REC->field ? "foo" : "bar" and we don't want to get into
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 9949ffad8df09d..cee65cb4310816 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3680,23 +3680,27 @@ void workqueue_softirq_dead(unsigned int cpu)
+ * check_flush_dependency - check for flush dependency sanity
+ * @target_wq: workqueue being flushed
+ * @target_work: work item being flushed (NULL for workqueue flushes)
++ * @from_cancel: are we called from the work cancel path
+ *
+ * %current is trying to flush the whole @target_wq or @target_work on it.
+- * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
+- * reclaiming memory or running on a workqueue which doesn't have
+- * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
+- * a deadlock.
++ * If this is not the cancel path (which implies work being flushed is either
++ * already running, or will not be at all), check if @target_wq doesn't have
++ * %WQ_MEM_RECLAIM and verify that %current is not reclaiming memory or running
++ * on a workqueue which doesn't have %WQ_MEM_RECLAIM as that can break forward-
++ * progress guarantee leading to a deadlock.
+ */
+ static void check_flush_dependency(struct workqueue_struct *target_wq,
+- struct work_struct *target_work)
++ struct work_struct *target_work,
++ bool from_cancel)
+ {
+- work_func_t target_func = target_work ? target_work->func : NULL;
++ work_func_t target_func;
+ struct worker *worker;
+
+- if (target_wq->flags & WQ_MEM_RECLAIM)
++ if (from_cancel || target_wq->flags & WQ_MEM_RECLAIM)
+ return;
+
+ worker = current_wq_worker();
++ target_func = target_work ? target_work->func : NULL;
+
+ WARN_ONCE(current->flags & PF_MEMALLOC,
+ "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
+@@ -3966,7 +3970,7 @@ void __flush_workqueue(struct workqueue_struct *wq)
+ list_add_tail(&this_flusher.list, &wq->flusher_overflow);
+ }
+
+- check_flush_dependency(wq, NULL);
++ check_flush_dependency(wq, NULL, false);
+
+ mutex_unlock(&wq->mutex);
+
+@@ -4141,7 +4145,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
+ }
+
+ wq = pwq->wq;
+- check_flush_dependency(wq, work);
++ check_flush_dependency(wq, work, from_cancel);
+
+ insert_wq_barrier(pwq, barr, work, worker);
+ raw_spin_unlock_irq(&pool->lock);
+@@ -5627,6 +5631,7 @@ static void wq_adjust_max_active(struct workqueue_struct *wq)
+ } while (activated);
+ }
+
++__printf(1, 0)
+ static struct workqueue_struct *__alloc_workqueue(const char *fmt,
+ unsigned int flags,
+ int max_active, va_list args)
+diff --git a/lib/maple_tree.c b/lib/maple_tree.c
+index 8d83e217271967..0cbe913634be4b 100644
+--- a/lib/maple_tree.c
++++ b/lib/maple_tree.c
+@@ -4367,6 +4367,7 @@ int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp,
+ ret = 1;
+ }
+ if (ret < 0 && range_lo > min) {
++ mas_reset(mas);
+ ret = mas_empty_area(mas, min, range_hi, 1);
+ if (ret == 0)
+ ret = 1;
+diff --git a/mm/damon/core.c b/mm/damon/core.c
+index 511c3f61ab44c4..54f4dd8d549f06 100644
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -868,6 +868,11 @@ static int damon_commit_schemes(struct damon_ctx *dst, struct damon_ctx *src)
+ NUMA_NO_NODE);
+ if (!new_scheme)
+ return -ENOMEM;
++ err = damos_commit(new_scheme, src_scheme);
++ if (err) {
++ damon_destroy_scheme(new_scheme);
++ return err;
++ }
+ damon_add_scheme(dst, new_scheme);
+ }
+ return 0;
+@@ -961,8 +966,11 @@ static int damon_commit_targets(
+ return -ENOMEM;
+ err = damon_commit_target(new_target, false,
+ src_target, damon_target_has_pid(src));
+- if (err)
++ if (err) {
++ damon_destroy_target(new_target);
+ return err;
++ }
++ damon_add_target(dst, new_target);
+ }
+ return 0;
+ }
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 5dc57b74a8fe9a..2fa87b9ecec6c7 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -7200,7 +7200,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
+ spte = hugetlb_walk(svma, saddr,
+ vma_mmu_pagesize(svma));
+ if (spte) {
+- get_page(virt_to_page(spte));
++ ptdesc_pmd_pts_inc(virt_to_ptdesc(spte));
+ break;
+ }
+ }
+@@ -7215,7 +7215,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
+ (pmd_t *)((unsigned long)spte & PAGE_MASK));
+ mm_inc_nr_pmds(mm);
+ } else {
+- put_page(virt_to_page(spte));
++ ptdesc_pmd_pts_dec(virt_to_ptdesc(spte));
+ }
+ spin_unlock(&mm->page_table_lock);
+ out:
+@@ -7227,10 +7227,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
+ /*
+ * unmap huge page backed by shared pte.
+ *
+- * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
+- * indicated by page_count > 1, unmap is achieved by clearing pud and
+- * decrementing the ref count. If count == 1, the pte page is not shared.
+- *
+ * Called with page table lock held.
+ *
+ * returns: 1 successfully unmapped a shared pte page
+@@ -7239,18 +7235,20 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
+ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+ {
++ unsigned long sz = huge_page_size(hstate_vma(vma));
+ pgd_t *pgd = pgd_offset(mm, addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ pud_t *pud = pud_offset(p4d, addr);
+
+ i_mmap_assert_write_locked(vma->vm_file->f_mapping);
+ hugetlb_vma_assert_locked(vma);
+- BUG_ON(page_count(virt_to_page(ptep)) == 0);
+- if (page_count(virt_to_page(ptep)) == 1)
++ if (sz != PMD_SIZE)
++ return 0;
++ if (!ptdesc_pmd_pts_count(virt_to_ptdesc(ptep)))
+ return 0;
+
+ pud_clear(pud);
+- put_page(virt_to_page(ptep));
++ ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep));
+ mm_dec_nr_pmds(mm);
+ return 1;
+ }
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 0400f5e8ac60de..74f5f4c51ab8c8 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -373,7 +373,7 @@ static void print_unreferenced(struct seq_file *seq,
+
+ for (i = 0; i < nr_entries; i++) {
+ void *ptr = (void *)entries[i];
+- warn_or_seq_printf(seq, " [<%pK>] %pS\n", ptr, ptr);
++ warn_or_seq_printf(seq, " %pS\n", ptr);
+ }
+ }
+
+diff --git a/mm/memfd.c b/mm/memfd.c
+index c17c3ea701a17e..35a370d75c9ad7 100644
+--- a/mm/memfd.c
++++ b/mm/memfd.c
+@@ -170,7 +170,7 @@ static int memfd_wait_for_pins(struct address_space *mapping)
+ return error;
+ }
+
+-static unsigned int *memfd_file_seals_ptr(struct file *file)
++unsigned int *memfd_file_seals_ptr(struct file *file)
+ {
+ if (shmem_file(file))
+ return &SHMEM_I(file_inode(file))->seals;
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 7fb4c1e97175f9..6183805f6f9e6e 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -47,6 +47,7 @@
+ #include <linux/oom.h>
+ #include <linux/sched/mm.h>
+ #include <linux/ksm.h>
++#include <linux/memfd.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/cacheflush.h>
+@@ -368,6 +369,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
+
+ if (file) {
+ struct inode *inode = file_inode(file);
++ unsigned int seals = memfd_file_seals(file);
+ unsigned long flags_mask;
+
+ if (!file_mmap_ok(file, inode, pgoff, len))
+@@ -408,6 +410,8 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
+ vm_flags |= VM_SHARED | VM_MAYSHARE;
+ if (!(file->f_mode & FMODE_WRITE))
+ vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
++ else if (is_readonly_sealed(seals, vm_flags))
++ vm_flags &= ~VM_MAYWRITE;
+ fallthrough;
+ case MAP_PRIVATE:
+ if (!(file->f_mode & FMODE_READ))
+diff --git a/mm/readahead.c b/mm/readahead.c
+index 99fdb2b5b56862..bf79275060f3be 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -641,7 +641,11 @@ void page_cache_async_ra(struct readahead_control *ractl,
+ 1UL << order);
+ if (index == expected) {
+ ra->start += ra->size;
+- ra->size = get_next_ra_size(ra, max_pages);
++ /*
++ * In the case of MADV_HUGEPAGE, the actual size might exceed
++ * the readahead window.
++ */
++ ra->size = max(ra->size, get_next_ra_size(ra, max_pages));
+ ra->async_size = ra->size;
+ goto readit;
+ }
+diff --git a/mm/shmem.c b/mm/shmem.c
+index b03ced0c3d4858..dd4eb11c84b59e 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1527,7 +1527,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
+ !shmem_falloc->waitq &&
+ index >= shmem_falloc->start &&
+ index < shmem_falloc->next)
+- shmem_falloc->nr_unswapped++;
++ shmem_falloc->nr_unswapped += nr_pages;
+ else
+ shmem_falloc = NULL;
+ spin_unlock(&inode->i_lock);
+@@ -1664,6 +1664,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ unsigned long mask = READ_ONCE(huge_shmem_orders_always);
+ unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size);
+ unsigned long vm_flags = vma ? vma->vm_flags : 0;
++ pgoff_t aligned_index;
+ bool global_huge;
+ loff_t i_size;
+ int order;
+@@ -1698,9 +1699,9 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ /* Allow mTHP that will be fully within i_size. */
+ order = highest_order(within_size_orders);
+ while (within_size_orders) {
+- index = round_up(index + 1, order);
++ aligned_index = round_up(index + 1, 1 << order);
+ i_size = round_up(i_size_read(inode), PAGE_SIZE);
+- if (i_size >> PAGE_SHIFT >= index) {
++ if (i_size >> PAGE_SHIFT >= aligned_index) {
+ mask |= within_size_orders;
+ break;
+ }
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 28ba2b06fc7dc2..67a680e4b484d7 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -374,7 +374,14 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
+ if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
+ nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
+ zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
+-
++ /*
++ * If there are no reclaimable file-backed or anonymous pages,
++ * ensure zones with sufficient free pages are not skipped.
++ * This prevents zones like DMA32 from being ignored in reclaim
++ * scenarios where they can still help alleviate memory pressure.
++ */
++ if (nr == 0)
++ nr = zone_page_state_snapshot(zone, NR_FREE_PAGES);
+ return nr;
+ }
+
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 72439764186ed2..b5553c08e73162 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -57,7 +57,6 @@ DEFINE_RWLOCK(hci_dev_list_lock);
+
+ /* HCI callback list */
+ LIST_HEAD(hci_cb_list);
+-DEFINE_MUTEX(hci_cb_list_lock);
+
+ /* HCI ID Numbering */
+ static DEFINE_IDA(hci_index_ida);
+@@ -2993,9 +2992,7 @@ int hci_register_cb(struct hci_cb *cb)
+ {
+ BT_DBG("%p name %s", cb, cb->name);
+
+- mutex_lock(&hci_cb_list_lock);
+- list_add_tail(&cb->list, &hci_cb_list);
+- mutex_unlock(&hci_cb_list_lock);
++ list_add_tail_rcu(&cb->list, &hci_cb_list);
+
+ return 0;
+ }
+@@ -3005,9 +3002,8 @@ int hci_unregister_cb(struct hci_cb *cb)
+ {
+ BT_DBG("%p name %s", cb, cb->name);
+
+- mutex_lock(&hci_cb_list_lock);
+- list_del(&cb->list);
+- mutex_unlock(&hci_cb_list_lock);
++ list_del_rcu(&cb->list);
++ synchronize_rcu();
+
+ return 0;
+ }
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 644b606743e212..bda2f2da7d7311 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -2137,6 +2137,11 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ return HCI_LM_ACCEPT;
+ }
+
++static bool iso_match(struct hci_conn *hcon)
++{
++ return hcon->type == ISO_LINK || hcon->type == LE_LINK;
++}
++
+ static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
+ {
+ if (hcon->type != ISO_LINK) {
+@@ -2318,6 +2323,7 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+
+ static struct hci_cb iso_cb = {
+ .name = "ISO",
++ .match = iso_match,
+ .connect_cfm = iso_connect_cfm,
+ .disconn_cfm = iso_disconn_cfm,
+ };
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 6544c1ed714344..27b4c4a2ba1fdd 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -7217,6 +7217,11 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
+ return NULL;
+ }
+
++static bool l2cap_match(struct hci_conn *hcon)
++{
++ return hcon->type == ACL_LINK || hcon->type == LE_LINK;
++}
++
+ static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+ {
+ struct hci_dev *hdev = hcon->hdev;
+@@ -7224,9 +7229,6 @@ static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+ struct l2cap_chan *pchan;
+ u8 dst_type;
+
+- if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
+- return;
+-
+ BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
+
+ if (status) {
+@@ -7291,9 +7293,6 @@ int l2cap_disconn_ind(struct hci_conn *hcon)
+
+ static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+ {
+- if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
+- return;
+-
+ BT_DBG("hcon %p reason %d", hcon, reason);
+
+ l2cap_conn_del(hcon, bt_to_errno(reason));
+@@ -7572,6 +7571,7 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+
+ static struct hci_cb l2cap_cb = {
+ .name = "L2CAP",
++ .match = l2cap_match,
+ .connect_cfm = l2cap_connect_cfm,
+ .disconn_cfm = l2cap_disconn_cfm,
+ .security_cfm = l2cap_security_cfm,
+diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
+index ad5177e3a69b77..4c56ca5a216c6f 100644
+--- a/net/bluetooth/rfcomm/core.c
++++ b/net/bluetooth/rfcomm/core.c
+@@ -2134,6 +2134,11 @@ static int rfcomm_run(void *unused)
+ return 0;
+ }
+
++static bool rfcomm_match(struct hci_conn *hcon)
++{
++ return hcon->type == ACL_LINK;
++}
++
+ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
+ {
+ struct rfcomm_session *s;
+@@ -2180,6 +2185,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
+
+ static struct hci_cb rfcomm_cb = {
+ .name = "RFCOMM",
++ .match = rfcomm_match,
+ .security_cfm = rfcomm_security_cfm
+ };
+
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index b872a2ca3ff38b..071c404c790af9 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -1355,11 +1355,13 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ return lm;
+ }
+
+-static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
++static bool sco_match(struct hci_conn *hcon)
+ {
+- if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
+- return;
++ return hcon->type == SCO_LINK || hcon->type == ESCO_LINK;
++}
+
++static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
++{
+ BT_DBG("hcon %p bdaddr %pMR status %u", hcon, &hcon->dst, status);
+
+ if (!status) {
+@@ -1374,9 +1376,6 @@ static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+
+ static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+ {
+- if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
+- return;
+-
+ BT_DBG("hcon %p reason %d", hcon, reason);
+
+ sco_conn_del(hcon, bt_to_errno(reason));
+@@ -1402,6 +1401,7 @@ void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+
+ static struct hci_cb sco_cb = {
+ .name = "SCO",
++ .match = sco_match,
+ .connect_cfm = sco_connect_cfm,
+ .disconn_cfm = sco_disconn_cfm,
+ };
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 8453e14d301b63..f3fa8353d262b0 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3640,8 +3640,10 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
+
+ if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6) &&
+- skb_network_header_len(skb) != sizeof(struct ipv6hdr))
++ skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
++ !ipv6_has_hopopt_jumbo(skb))
+ goto sw_checksum;
++
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ case offsetof(struct udphdr, check):
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 55495063621d6c..54a53fae9e98f5 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -7918,42 +7918,37 @@ static const struct bpf_func_proto bpf_tcp_raw_check_syncookie_ipv6_proto = {
+
+ #endif /* CONFIG_INET */
+
+-bool bpf_helper_changes_pkt_data(void *func)
+-{
+- if (func == bpf_skb_vlan_push ||
+- func == bpf_skb_vlan_pop ||
+- func == bpf_skb_store_bytes ||
+- func == bpf_skb_change_proto ||
+- func == bpf_skb_change_head ||
+- func == sk_skb_change_head ||
+- func == bpf_skb_change_tail ||
+- func == sk_skb_change_tail ||
+- func == bpf_skb_adjust_room ||
+- func == sk_skb_adjust_room ||
+- func == bpf_skb_pull_data ||
+- func == sk_skb_pull_data ||
+- func == bpf_clone_redirect ||
+- func == bpf_l3_csum_replace ||
+- func == bpf_l4_csum_replace ||
+- func == bpf_xdp_adjust_head ||
+- func == bpf_xdp_adjust_meta ||
+- func == bpf_msg_pull_data ||
+- func == bpf_msg_push_data ||
+- func == bpf_msg_pop_data ||
+- func == bpf_xdp_adjust_tail ||
+-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
+- func == bpf_lwt_seg6_store_bytes ||
+- func == bpf_lwt_seg6_adjust_srh ||
+- func == bpf_lwt_seg6_action ||
+-#endif
+-#ifdef CONFIG_INET
+- func == bpf_sock_ops_store_hdr_opt ||
+-#endif
+- func == bpf_lwt_in_push_encap ||
+- func == bpf_lwt_xmit_push_encap)
++bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
++{
++ switch (func_id) {
++ case BPF_FUNC_clone_redirect:
++ case BPF_FUNC_l3_csum_replace:
++ case BPF_FUNC_l4_csum_replace:
++ case BPF_FUNC_lwt_push_encap:
++ case BPF_FUNC_lwt_seg6_action:
++ case BPF_FUNC_lwt_seg6_adjust_srh:
++ case BPF_FUNC_lwt_seg6_store_bytes:
++ case BPF_FUNC_msg_pop_data:
++ case BPF_FUNC_msg_pull_data:
++ case BPF_FUNC_msg_push_data:
++ case BPF_FUNC_skb_adjust_room:
++ case BPF_FUNC_skb_change_head:
++ case BPF_FUNC_skb_change_proto:
++ case BPF_FUNC_skb_change_tail:
++ case BPF_FUNC_skb_pull_data:
++ case BPF_FUNC_skb_store_bytes:
++ case BPF_FUNC_skb_vlan_pop:
++ case BPF_FUNC_skb_vlan_push:
++ case BPF_FUNC_store_hdr_opt:
++ case BPF_FUNC_xdp_adjust_head:
++ case BPF_FUNC_xdp_adjust_meta:
++ case BPF_FUNC_xdp_adjust_tail:
++ /* tail-called program could call any of the above */
++ case BPF_FUNC_tail_call:
+ return true;
+-
+- return false;
++ default:
++ return false;
++ }
+ }
+
+ const struct bpf_func_proto bpf_event_output_data_proto __weak;
+diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
+index 7ce22f40db5b04..d58270b48cb2cf 100644
+--- a/net/core/netdev-genl.c
++++ b/net/core/netdev-genl.c
+@@ -228,8 +228,12 @@ int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info)
+ rcu_read_unlock();
+ rtnl_unlock();
+
+- if (err)
++ if (err) {
++ goto err_free_msg;
++ } else if (!rsp->len) {
++ err = -ENOENT;
+ goto err_free_msg;
++ }
+
+ return genlmsg_reply(rsp, info);
+
+diff --git a/net/core/sock.c b/net/core/sock.c
+index da50df485090ff..a83f64a1d96a29 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1300,7 +1300,10 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
+ sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
+ break;
+ case SO_REUSEPORT:
+- sk->sk_reuseport = valbool;
++ if (valbool && !sk_is_inet(sk))
++ ret = -EOPNOTSUPP;
++ else
++ sk->sk_reuseport = valbool;
+ break;
+ case SO_DONTROUTE:
+ sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 25505f9b724c33..09b73acf037ae2 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -294,7 +294,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
+
+ ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
+ iph->saddr, tunnel->parms.o_key,
+- iph->tos & INET_DSCP_MASK, dev_net(dev),
++ iph->tos & INET_DSCP_MASK, tunnel->net,
+ tunnel->parms.link, tunnel->fwmark, 0, 0);
+ rt = ip_route_output_key(tunnel->net, &fl4);
+
+@@ -611,7 +611,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ }
+ ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
+ tunnel_id_to_key32(key->tun_id),
+- tos & INET_DSCP_MASK, dev_net(dev), 0, skb->mark,
++ tos & INET_DSCP_MASK, tunnel->net, 0, skb->mark,
+ skb_get_hash(skb), key->flow_flags);
+
+ if (!tunnel_hlen)
+@@ -774,7 +774,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+
+ ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
+ tunnel->parms.o_key, tos & INET_DSCP_MASK,
+- dev_net(dev), READ_ONCE(tunnel->parms.link),
++ tunnel->net, READ_ONCE(tunnel->parms.link),
+ tunnel->fwmark, skb_get_hash(skb), 0);
+
+ if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0)
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 2d844e1f867f0a..2d43b29da15e20 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -7328,6 +7328,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
+ req->timeout))) {
+ reqsk_free(req);
++ dst_release(dst);
+ return 0;
+ }
+
+diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
+index 534a4498e280d7..fff09f5a796a75 100644
+--- a/net/ipv6/ila/ila_xlat.c
++++ b/net/ipv6/ila/ila_xlat.c
+@@ -200,6 +200,8 @@ static const struct nf_hook_ops ila_nf_hook_ops[] = {
+ },
+ };
+
++static DEFINE_MUTEX(ila_mutex);
++
+ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
+ {
+ struct ila_net *ilan = net_generic(net, ila_net_id);
+@@ -207,16 +209,20 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
+ spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
+ int err = 0, order;
+
+- if (!ilan->xlat.hooks_registered) {
++ if (!READ_ONCE(ilan->xlat.hooks_registered)) {
+ /* We defer registering net hooks in the namespace until the
+ * first mapping is added.
+ */
+- err = nf_register_net_hooks(net, ila_nf_hook_ops,
+- ARRAY_SIZE(ila_nf_hook_ops));
++ mutex_lock(&ila_mutex);
++ if (!ilan->xlat.hooks_registered) {
++ err = nf_register_net_hooks(net, ila_nf_hook_ops,
++ ARRAY_SIZE(ila_nf_hook_ops));
++ if (!err)
++ WRITE_ONCE(ilan->xlat.hooks_registered, true);
++ }
++ mutex_unlock(&ila_mutex);
+ if (err)
+ return err;
+-
+- ilan->xlat.hooks_registered = true;
+ }
+
+ ila = kzalloc(sizeof(*ila), GFP_KERNEL);
+diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
+index 51bccfb00a9cd9..61b0159b2fbee6 100644
+--- a/net/llc/llc_input.c
++++ b/net/llc/llc_input.c
+@@ -124,8 +124,8 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
+ if (unlikely(!pskb_may_pull(skb, llc_len)))
+ return 0;
+
+- skb->transport_header += llc_len;
+ skb_pull(skb, llc_len);
++ skb_reset_transport_header(skb);
+ if (skb->protocol == htons(ETH_P_802_2)) {
+ __be16 pdulen;
+ s32 data_size;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 1b1bf044378d48..f11fd360b422dd 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -4992,10 +4992,16 @@ static void ieee80211_del_intf_link(struct wiphy *wiphy,
+ unsigned int link_id)
+ {
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
++ u16 new_links = wdev->valid_links & ~BIT(link_id);
+
+ lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
+- ieee80211_vif_set_links(sdata, wdev->valid_links, 0);
++ /* During the link teardown process, certain functions require the
++ * link_id to remain in the valid_links bitmap. Therefore, instead
++ * of removing the link_id from the bitmap, pass a masked value to
++ * simulate as if link_id does not exist anymore.
++ */
++ ieee80211_vif_set_links(sdata, new_links, 0);
+ }
+
+ static int
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index 640239f4425b16..50eb1d8cd43deb 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -1157,14 +1157,14 @@ void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
+ u64 changed)
+ {
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+- unsigned long bits = changed;
++ unsigned long bits[] = { BITMAP_FROM_U64(changed) };
+ u32 bit;
+
+- if (!bits)
++ if (!changed)
+ return;
+
+ /* if we race with running work, worst case this work becomes a noop */
+- for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE)
++ for_each_set_bit(bit, bits, sizeof(changed) * BITS_PER_BYTE)
+ set_bit(bit, ifmsh->mbss_changed);
+ set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags);
+ wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index b4814e97cf7422..38c30e4ddda98c 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -1825,6 +1825,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+ WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n");
+ else
+ WARN(1, "Hardware became unavailable during restart.\n");
++ ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
++ IEEE80211_QUEUE_STOP_REASON_SUSPEND,
++ false);
+ ieee80211_handle_reconfig_failure(local);
+ return res;
+ }
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 1603b3702e2207..a62bc874bf1e17 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -667,8 +667,15 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+ &echo, &drop_other_suboptions))
+ return false;
+
++ /*
++ * Later on, mptcp_write_options() will enforce mutually exclusion with
++ * DSS, bail out if such option is set and we can't drop it.
++ */
+ if (drop_other_suboptions)
+ remaining += opt_size;
++ else if (opts->suboptions & OPTION_MPTCP_DSS)
++ return false;
++
+ len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port);
+ if (remaining < len)
+ return false;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 8a8e8fee337f5e..4b9d850ce85a25 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -528,13 +528,13 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
+ mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
+ }
+
+-static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
++static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
+ {
+ bool slow;
+
+ slow = lock_sock_fast(ssk);
+ if (tcp_can_send_ack(ssk))
+- tcp_cleanup_rbuf(ssk, 1);
++ tcp_cleanup_rbuf(ssk, copied);
+ unlock_sock_fast(ssk, slow);
+ }
+
+@@ -551,7 +551,7 @@ static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
+ (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
+ }
+
+-static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
++static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
+ {
+ int old_space = READ_ONCE(msk->old_wspace);
+ struct mptcp_subflow_context *subflow;
+@@ -559,14 +559,14 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
+ int space = __mptcp_space(sk);
+ bool cleanup, rx_empty;
+
+- cleanup = (space > 0) && (space >= (old_space << 1));
+- rx_empty = !__mptcp_rmem(sk);
++ cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
++ rx_empty = !__mptcp_rmem(sk) && copied;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
+- mptcp_subflow_cleanup_rbuf(ssk);
++ mptcp_subflow_cleanup_rbuf(ssk, copied);
+ }
+ }
+
+@@ -1939,6 +1939,8 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ goto out;
+ }
+
++static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
++
+ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
+ struct msghdr *msg,
+ size_t len, int flags,
+@@ -1992,6 +1994,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
+ break;
+ }
+
++ mptcp_rcv_space_adjust(msk, copied);
+ return copied;
+ }
+
+@@ -2217,9 +2220,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+
+ copied += bytes_read;
+
+- /* be sure to advertise window change */
+- mptcp_cleanup_rbuf(msk);
+-
+ if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
+ continue;
+
+@@ -2268,7 +2268,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ }
+
+ pr_debug("block timeout %ld\n", timeo);
+- mptcp_rcv_space_adjust(msk, copied);
++ mptcp_cleanup_rbuf(msk, copied);
+ err = sk_wait_data(sk, &timeo, NULL);
+ if (err < 0) {
+ err = copied ? : err;
+@@ -2276,7 +2276,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ }
+ }
+
+- mptcp_rcv_space_adjust(msk, copied);
++ mptcp_cleanup_rbuf(msk, copied);
+
+ out_err:
+ if (cmsg_flags && copied >= 0) {
+diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
+index 2b5e246b8d9a7a..b94cb2ffbaf8fa 100644
+--- a/net/netrom/nr_route.c
++++ b/net/netrom/nr_route.c
+@@ -754,6 +754,12 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ int ret;
+ struct sk_buff *skbn;
+
++ /*
++ * Reject malformed packets early. Check that it contains at least 2
++ * addresses and 1 byte more for Time-To-Live
++ */
++ if (skb->len < 2 * sizeof(ax25_address) + 1)
++ return 0;
+
+ nr_src = (ax25_address *)(skb->data + 0);
+ nr_dest = (ax25_address *)(skb->data + 7);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 97774bd4b6cb11..f3cecb3e4bcb18 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -538,10 +538,8 @@ static void *packet_current_frame(struct packet_sock *po,
+ return packet_lookup_frame(po, rb, rb->head, status);
+ }
+
+-static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
++static u16 vlan_get_tci(const struct sk_buff *skb, struct net_device *dev)
+ {
+- u8 *skb_orig_data = skb->data;
+- int skb_orig_len = skb->len;
+ struct vlan_hdr vhdr, *vh;
+ unsigned int header_len;
+
+@@ -562,33 +560,21 @@ static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
+ else
+ return 0;
+
+- skb_push(skb, skb->data - skb_mac_header(skb));
+- vh = skb_header_pointer(skb, header_len, sizeof(vhdr), &vhdr);
+- if (skb_orig_data != skb->data) {
+- skb->data = skb_orig_data;
+- skb->len = skb_orig_len;
+- }
++ vh = skb_header_pointer(skb, skb_mac_offset(skb) + header_len,
++ sizeof(vhdr), &vhdr);
+ if (unlikely(!vh))
+ return 0;
+
+ return ntohs(vh->h_vlan_TCI);
+ }
+
+-static __be16 vlan_get_protocol_dgram(struct sk_buff *skb)
++static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb)
+ {
+ __be16 proto = skb->protocol;
+
+- if (unlikely(eth_type_vlan(proto))) {
+- u8 *skb_orig_data = skb->data;
+- int skb_orig_len = skb->len;
+-
+- skb_push(skb, skb->data - skb_mac_header(skb));
+- proto = __vlan_get_protocol(skb, proto, NULL);
+- if (skb_orig_data != skb->data) {
+- skb->data = skb_orig_data;
+- skb->len = skb_orig_len;
+- }
+- }
++ if (unlikely(eth_type_vlan(proto)))
++ proto = __vlan_get_protocol_offset(skb, proto,
++ skb_mac_offset(skb), NULL);
+
+ return proto;
+ }
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index c45c192b787873..0b0794f164cf2e 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -137,7 +137,8 @@ static struct sctp_association *sctp_association_init(
+ = 5 * asoc->rto_max;
+
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
+- asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
++ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
++ (unsigned long)sp->autoclose * HZ;
+
+ /* Initializes the timers */
+ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index f49b55724f8341..18585b1416c662 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -2843,10 +2843,9 @@ void cfg80211_remove_link(struct wireless_dev *wdev, unsigned int link_id)
+ break;
+ }
+
+- wdev->valid_links &= ~BIT(link_id);
+-
+ rdev_del_intf_link(rdev, wdev, link_id);
+
++ wdev->valid_links &= ~BIT(link_id);
+ eth_zero_addr(wdev->links[link_id].addr);
+ }
+
+diff --git a/scripts/mksysmap b/scripts/mksysmap
+index c12723a0465562..3accbdb269ac70 100755
+--- a/scripts/mksysmap
++++ b/scripts/mksysmap
+@@ -26,7 +26,7 @@
+ # (do not forget a space before each pattern)
+
+ # local symbols for ARM, MIPS, etc.
+-/ \\$/d
++/ \$/d
+
+ # local labels, .LBB, .Ltmpxxx, .L__unnamed_xx, .LASANPC, etc.
+ / \.L/d
+@@ -39,7 +39,7 @@
+ / __pi_\.L/d
+
+ # arm64 local symbols in non-VHE KVM namespace
+-/ __kvm_nvhe_\\$/d
++/ __kvm_nvhe_\$/d
+ / __kvm_nvhe_\.L/d
+
+ # lld arm/aarch64/mips thunks
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 634e40748287c0..721e0e9f17cada 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -742,7 +742,7 @@ static void do_input(char *alias,
+
+ for (i = min / BITS_PER_LONG; i < max / BITS_PER_LONG + 1; i++)
+ arr[i] = TO_NATIVE(arr[i]);
+- for (i = min; i < max; i++)
++ for (i = min; i <= max; i++)
+ if (arr[i / BITS_PER_LONG] & (1ULL << (i%BITS_PER_LONG)))
+ sprintf(alias + strlen(alias), "%X,*", i);
+ }
+diff --git a/scripts/package/PKGBUILD b/scripts/package/PKGBUILD
+index f83493838cf96a..dca706617adc76 100644
+--- a/scripts/package/PKGBUILD
++++ b/scripts/package/PKGBUILD
+@@ -103,7 +103,7 @@ _package-headers() {
+
+ _package-api-headers() {
+ pkgdesc="Kernel headers sanitized for use in userspace"
+- provides=(linux-api-headers)
++ provides=(linux-api-headers="${pkgver}")
+ conflicts=(linux-api-headers)
+
+ _prologue
+diff --git a/scripts/sorttable.h b/scripts/sorttable.h
+index 7bd0184380d3b9..a7c5445baf0027 100644
+--- a/scripts/sorttable.h
++++ b/scripts/sorttable.h
+@@ -110,7 +110,7 @@ static inline unsigned long orc_ip(const int *ip)
+
+ static int orc_sort_cmp(const void *_a, const void *_b)
+ {
+- struct orc_entry *orc_a;
++ struct orc_entry *orc_a, *orc_b;
+ const int *a = g_orc_ip_table + *(int *)_a;
+ const int *b = g_orc_ip_table + *(int *)_b;
+ unsigned long a_val = orc_ip(a);
+@@ -128,6 +128,9 @@ static int orc_sort_cmp(const void *_a, const void *_b)
+ * whitelisted .o files which didn't get objtool generation.
+ */
+ orc_a = g_orc_table + (a - g_orc_ip_table);
++ orc_b = g_orc_table + (b - g_orc_ip_table);
++ if (orc_a->type == ORC_TYPE_UNDEFINED && orc_b->type == ORC_TYPE_UNDEFINED)
++ return 0;
+ return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
+ }
+
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index a9830fbfc5c66c..88850405ded929 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -955,7 +955,10 @@ void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
+ xpermd->driver))
+ return;
+ } else {
+- BUG();
++ pr_warn_once(
++ "SELinux: unknown extended permission (%u) will be ignored\n",
++ node->datum.u.xperms->specified);
++ return;
+ }
+
+ if (node->key.specified == AVTAB_XPERMS_ALLOWED) {
+@@ -992,7 +995,8 @@ void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
+ node->datum.u.xperms->perms.p[i];
+ }
+ } else {
+- BUG();
++ pr_warn_once("SELinux: unknown specified key (%u)\n",
++ node->key.specified);
+ }
+ }
+
+diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
+index e3394919daa09a..51ee4c00a84310 100644
+--- a/sound/core/seq/oss/seq_oss_synth.c
++++ b/sound/core/seq/oss/seq_oss_synth.c
+@@ -66,6 +66,7 @@ static struct seq_oss_synth midi_synth_dev = {
+ };
+
+ static DEFINE_SPINLOCK(register_lock);
++static DEFINE_MUTEX(sysex_mutex);
+
+ /*
+ * prototypes
+@@ -497,6 +498,7 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
+ if (!info)
+ return -ENXIO;
+
++ guard(mutex)(&sysex_mutex);
+ sysex = info->sysex;
+ if (sysex == NULL) {
+ sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 3930e2f9082f42..77b6ac9b5c11bc 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1275,10 +1275,16 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
+ if (client->type != client_info->type)
+ return -EINVAL;
+
+- /* check validity of midi_version field */
+- if (client->user_pversion >= SNDRV_PROTOCOL_VERSION(1, 0, 3) &&
+- client_info->midi_version > SNDRV_SEQ_CLIENT_UMP_MIDI_2_0)
+- return -EINVAL;
++ if (client->user_pversion >= SNDRV_PROTOCOL_VERSION(1, 0, 3)) {
++ /* check validity of midi_version field */
++ if (client_info->midi_version > SNDRV_SEQ_CLIENT_UMP_MIDI_2_0)
++ return -EINVAL;
++
++ /* check if UMP is supported in kernel */
++ if (!IS_ENABLED(CONFIG_SND_SEQ_UMP) &&
++ client_info->midi_version > 0)
++ return -EINVAL;
++ }
+
+ /* fill the info fields */
+ if (client_info->name[0])
+diff --git a/sound/core/ump.c b/sound/core/ump.c
+index bd26bb2210cbd4..abc537d54b7312 100644
+--- a/sound/core/ump.c
++++ b/sound/core/ump.c
+@@ -1244,7 +1244,7 @@ static int fill_legacy_mapping(struct snd_ump_endpoint *ump)
+
+ num = 0;
+ for (i = 0; i < SNDRV_UMP_MAX_GROUPS; i++)
+- if ((group_maps & (1U << i)) && ump->groups[i].valid)
++ if (group_maps & (1U << i))
+ ump->legacy_mapping[num++] = i;
+
+ return num;
+diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
+index e3ac0e23ae3211..7baf3b506eefec 100644
+--- a/sound/pci/hda/cs35l56_hda.c
++++ b/sound/pci/hda/cs35l56_hda.c
+@@ -151,10 +151,6 @@ static int cs35l56_hda_runtime_resume(struct device *dev)
+ }
+ }
+
+- ret = cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
+- if (ret)
+- goto err;
+-
+ return 0;
+
+ err:
+@@ -1059,9 +1055,6 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int hid, int id)
+
+ regmap_multi_reg_write(cs35l56->base.regmap, cs35l56_hda_dai_config,
+ ARRAY_SIZE(cs35l56_hda_dai_config));
+- ret = cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
+- if (ret)
+- goto dsp_err;
+
+ /*
+ * By default only enable one ASP1TXn, where n=amplifier index,
+@@ -1087,7 +1080,6 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int hid, int id)
+
+ pm_err:
+ pm_runtime_disable(cs35l56->base.dev);
+-dsp_err:
+ cs_dsp_remove(&cs35l56->cs_dsp);
+ err:
+ gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 0);
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index e4673a71551a3b..d40197fb5fbd58 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -1134,7 +1134,6 @@ struct ca0132_spec {
+
+ struct hda_codec *codec;
+ struct delayed_work unsol_hp_work;
+- int quirk;
+
+ #ifdef ENABLE_TUNING_CONTROLS
+ long cur_ctl_vals[TUNING_CTLS_COUNT];
+@@ -1166,7 +1165,6 @@ struct ca0132_spec {
+ * CA0132 quirks table
+ */
+ enum {
+- QUIRK_NONE,
+ QUIRK_ALIENWARE,
+ QUIRK_ALIENWARE_M17XR4,
+ QUIRK_SBZ,
+@@ -1176,10 +1174,11 @@ enum {
+ QUIRK_R3D,
+ QUIRK_AE5,
+ QUIRK_AE7,
++ QUIRK_NONE = HDA_FIXUP_ID_NOT_SET,
+ };
+
+ #ifdef CONFIG_PCI
+-#define ca0132_quirk(spec) ((spec)->quirk)
++#define ca0132_quirk(spec) ((spec)->codec->fixup_id)
+ #define ca0132_use_pci_mmio(spec) ((spec)->use_pci_mmio)
+ #define ca0132_use_alt_functions(spec) ((spec)->use_alt_functions)
+ #define ca0132_use_alt_controls(spec) ((spec)->use_alt_controls)
+@@ -1293,7 +1292,7 @@ static const struct hda_pintbl ae7_pincfgs[] = {
+ {}
+ };
+
+-static const struct snd_pci_quirk ca0132_quirks[] = {
++static const struct hda_quirk ca0132_quirks[] = {
+ SND_PCI_QUIRK(0x1028, 0x057b, "Alienware M17x R4", QUIRK_ALIENWARE_M17XR4),
+ SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
+ SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
+@@ -1316,6 +1315,19 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
+ {}
+ };
+
++static const struct hda_model_fixup ca0132_quirk_models[] = {
++ { .id = QUIRK_ALIENWARE, .name = "alienware" },
++ { .id = QUIRK_ALIENWARE_M17XR4, .name = "alienware-m17xr4" },
++ { .id = QUIRK_SBZ, .name = "sbz" },
++ { .id = QUIRK_ZXR, .name = "zxr" },
++ { .id = QUIRK_ZXR_DBPRO, .name = "zxr-dbpro" },
++ { .id = QUIRK_R3DI, .name = "r3di" },
++ { .id = QUIRK_R3D, .name = "r3d" },
++ { .id = QUIRK_AE5, .name = "ae5" },
++ { .id = QUIRK_AE7, .name = "ae7" },
++ {}
++};
++
+ /* Output selection quirk info structures. */
+ #define MAX_QUIRK_MMIO_GPIO_SET_VALS 3
+ #define MAX_QUIRK_SCP_SET_VALS 2
+@@ -9957,17 +9969,15 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
+ */
+ static void sbz_detect_quirk(struct hda_codec *codec)
+ {
+- struct ca0132_spec *spec = codec->spec;
+-
+ switch (codec->core.subsystem_id) {
+ case 0x11020033:
+- spec->quirk = QUIRK_ZXR;
++ codec->fixup_id = QUIRK_ZXR;
+ break;
+ case 0x1102003f:
+- spec->quirk = QUIRK_ZXR_DBPRO;
++ codec->fixup_id = QUIRK_ZXR_DBPRO;
+ break;
+ default:
+- spec->quirk = QUIRK_SBZ;
++ codec->fixup_id = QUIRK_SBZ;
+ break;
+ }
+ }
+@@ -9976,7 +9986,6 @@ static int patch_ca0132(struct hda_codec *codec)
+ {
+ struct ca0132_spec *spec;
+ int err;
+- const struct snd_pci_quirk *quirk;
+
+ codec_dbg(codec, "patch_ca0132\n");
+
+@@ -9987,11 +9996,7 @@ static int patch_ca0132(struct hda_codec *codec)
+ spec->codec = codec;
+
+ /* Detect codec quirk */
+- quirk = snd_pci_quirk_lookup(codec->bus->pci, ca0132_quirks);
+- if (quirk)
+- spec->quirk = quirk->value;
+- else
+- spec->quirk = QUIRK_NONE;
++ snd_hda_pick_fixup(codec, ca0132_quirk_models, ca0132_quirks, NULL);
+ if (ca0132_quirk(spec) == QUIRK_SBZ)
+ sbz_detect_quirk(codec);
+
+@@ -10068,7 +10073,7 @@ static int patch_ca0132(struct hda_codec *codec)
+ spec->mem_base = pci_iomap(codec->bus->pci, 2, 0xC20);
+ if (spec->mem_base == NULL) {
+ codec_warn(codec, "pci_iomap failed! Setting quirk to QUIRK_NONE.");
+- spec->quirk = QUIRK_NONE;
++ codec->fixup_id = QUIRK_NONE;
+ }
+ }
+ #endif
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 192fc75b51e6db..3ed82f98e2de9e 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7704,6 +7704,7 @@ enum {
+ ALC274_FIXUP_HP_MIC,
+ ALC274_FIXUP_HP_HEADSET_MIC,
+ ALC274_FIXUP_HP_ENVY_GPIO,
++ ALC274_FIXUP_ASUS_ZEN_AIO_27,
+ ALC256_FIXUP_ASUS_HPE,
+ ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
+ ALC287_FIXUP_HP_GPIO_LED,
+@@ -9505,6 +9506,26 @@ static const struct hda_fixup alc269_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc274_fixup_hp_envy_gpio,
+ },
++ [ALC274_FIXUP_ASUS_ZEN_AIO_27] = {
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x10 },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0xc420 },
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x40 },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x8800 },
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x49 },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0249 },
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x4a },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x202b },
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x62 },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0xa007 },
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x6b },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x5060 },
++ {}
++ },
++ .chained = true,
++ .chain_id = ALC2XX_FIXUP_HEADSET_MIC,
++ },
+ [ALC256_FIXUP_ASUS_HPE] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+@@ -10615,6 +10636,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
++ SND_PCI_QUIRK(0x1043, 0x31d0, "ASUS Zen AIO 27 Z272SD_A272SD", ALC274_FIXUP_ASUS_ZEN_AIO_27),
+ SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+@@ -10971,6 +10993,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0xf111, 0x000c, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+
+ #if 0
+ /* Below is a quirk table taken from the old code.
+@@ -11162,6 +11185,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
+ {.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"},
+ {.id = ALC236_FIXUP_LENOVO_INV_DMIC, .name = "alc236-fixup-lenovo-inv-mic"},
++ {.id = ALC2XX_FIXUP_HEADSET_MIC, .name = "alc2xx-fixup-headset-mic"},
+ {}
+ };
+ #define ALC225_STANDARD_PINS \
+diff --git a/sound/soc/generic/audio-graph-card2.c b/sound/soc/generic/audio-graph-card2.c
+index 93eee40cec760c..63837e25965956 100644
+--- a/sound/soc/generic/audio-graph-card2.c
++++ b/sound/soc/generic/audio-graph-card2.c
+@@ -779,7 +779,7 @@ static void graph_link_init(struct simple_util_priv *priv,
+ of_node_get(port_codec);
+ if (graph_lnk_is_multi(port_codec)) {
+ ep_codec = graph_get_next_multi_ep(&port_codec);
+- of_node_put(port_cpu);
++ of_node_put(port_codec);
+ port_codec = ep_to_port(ep_codec);
+ } else {
+ ep_codec = port_to_endpoint(port_codec);
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 0cbf1d4fbe6edd..6049d957694ca6 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -60,6 +60,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
+ pcm_formats |= SNDRV_PCM_FMTBIT_SPECIAL;
+ /* flag potentially raw DSD capable altsettings */
+ fp->dsd_raw = true;
++ /* clear special format bit to avoid "unsupported format" msg below */
++ format &= ~UAC2_FORMAT_TYPE_I_RAW_DATA;
+ }
+
+ format <<= 1;
+@@ -71,8 +73,11 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
+ sample_width = as->bBitResolution;
+ sample_bytes = as->bSubslotSize;
+
+- if (format & UAC3_FORMAT_TYPE_I_RAW_DATA)
++ if (format & UAC3_FORMAT_TYPE_I_RAW_DATA) {
+ pcm_formats |= SNDRV_PCM_FMTBIT_SPECIAL;
++ /* clear special format bit to avoid "unsupported format" msg below */
++ format &= ~UAC3_FORMAT_TYPE_I_RAW_DATA;
++ }
+
+ format <<= 1;
+ break;
+diff --git a/sound/usb/mixer_us16x08.c b/sound/usb/mixer_us16x08.c
+index 6eb7d93b358d99..20ac32635f1f50 100644
+--- a/sound/usb/mixer_us16x08.c
++++ b/sound/usb/mixer_us16x08.c
+@@ -687,7 +687,7 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol,
+ struct usb_mixer_elem_info *elem = kcontrol->private_data;
+ struct snd_usb_audio *chip = elem->head.mixer->chip;
+ struct snd_us16x08_meter_store *store = elem->private_data;
+- u8 meter_urb[64];
++ u8 meter_urb[64] = {0};
+
+ switch (kcontrol->private_value) {
+ case 0: {
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index a0767de7f1b7ed..8ba0aff8be2ec2 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2325,6 +2325,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_DSD_RAW),
+ DEVICE_FLG(0x2522, 0x0007, /* LH Labs Geek Out HD Audio 1V5 */
+ QUIRK_FLAG_SET_IFACE_FIRST),
++ DEVICE_FLG(0x262a, 0x9302, /* ddHiFi TC44C */
++ QUIRK_FLAG_DSD_RAW),
+ DEVICE_FLG(0x2708, 0x0002, /* Audient iD14 */
+ QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */
+diff --git a/tools/sched_ext/scx_central.c b/tools/sched_ext/scx_central.c
+index 21deea320bd785..e938156ed0a0d0 100644
+--- a/tools/sched_ext/scx_central.c
++++ b/tools/sched_ext/scx_central.c
+@@ -97,7 +97,7 @@ int main(int argc, char **argv)
+ SCX_BUG_ON(!cpuset, "Failed to allocate cpuset");
+ CPU_ZERO(cpuset);
+ CPU_SET(skel->rodata->central_cpu, cpuset);
+- SCX_BUG_ON(sched_setaffinity(0, sizeof(cpuset), cpuset),
++ SCX_BUG_ON(sched_setaffinity(0, sizeof(*cpuset), cpuset),
+ "Failed to affinitize to central CPU %d (max %d)",
+ skel->rodata->central_cpu, skel->rodata->nr_cpu_ids - 1);
+ CPU_FREE(cpuset);
+diff --git a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
+index 8a0632c37839a3..79f5087dade224 100644
+--- a/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
++++ b/tools/testing/selftests/bpf/progs/tc_bpf2bpf.c
+@@ -10,6 +10,8 @@ int subprog(struct __sk_buff *skb)
+ int ret = 1;
+
+ __sink(ret);
++ /* let verifier know that 'subprog_tc' can change pointers to skb->data */
++ bpf_skb_change_proto(skb, 0, 0);
+ return ret;
+ }
+
+diff --git a/tools/testing/selftests/net/forwarding/local_termination.sh b/tools/testing/selftests/net/forwarding/local_termination.sh
+index c35548767756d0..ecd34f364125cb 100755
+--- a/tools/testing/selftests/net/forwarding/local_termination.sh
++++ b/tools/testing/selftests/net/forwarding/local_termination.sh
+@@ -7,7 +7,6 @@ ALL_TESTS="standalone vlan_unaware_bridge vlan_aware_bridge test_vlan \
+ NUM_NETIFS=2
+ PING_COUNT=1
+ REQUIRE_MTOOLS=yes
+-REQUIRE_MZ=no
+
+ source lib.sh
+
next reply other threads:[~2025-01-09 13:51 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-01-09 13:51 Mike Pagano [this message]
-- strict thread matches above, loose matches on Subject: below --
2025-03-07 18:22 [gentoo-commits] proj/linux-patches:6.12 commit in: / Mike Pagano
2025-02-27 13:22 Mike Pagano
2025-02-21 13:31 Mike Pagano
2025-02-18 11:26 Mike Pagano
2025-02-17 15:44 Mike Pagano
2025-02-17 11:25 Mike Pagano
2025-02-17 11:16 Mike Pagano
2025-02-16 21:48 Mike Pagano
2025-02-08 11:26 Mike Pagano
2025-02-01 23:07 Mike Pagano
2025-01-30 12:47 Mike Pagano
2025-01-23 17:02 Mike Pagano
2025-01-17 13:18 Mike Pagano
2025-01-17 13:18 Mike Pagano
2025-01-02 12:31 Mike Pagano
2024-12-27 14:08 Mike Pagano
2024-12-19 18:07 Mike Pagano
2024-12-15 0:02 Mike Pagano
2024-12-14 23:59 Mike Pagano
2024-12-14 23:47 Mike Pagano
2024-12-11 21:01 Mike Pagano
2024-12-09 23:13 Mike Pagano
2024-12-09 11:35 Mike Pagano
2024-12-06 12:44 Mike Pagano
2024-12-05 20:05 Mike Pagano
2024-12-05 14:06 Mike Pagano
2024-12-02 17:15 Mike Pagano
2024-11-30 17:33 Mike Pagano
2024-11-22 17:45 Mike Pagano
2024-11-21 13:12 Mike Pagano
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1736430684.dce11bba7397f8cff2d315b9195b222824bbeed4.mpagano@gentoo \
--to=mpagano@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox