The following commit has been merged in the master branch:
commit e535aaad09f0e2c09dac41f75631af7efb5bdcea
Merge: 18d4faf4b82e57b5193451465859443db5390181 
7ea7694495db8ec2e80b601f967865263b44b16a
Author: Stephen Rothwell <[email protected]>
Date:   Fri Oct 31 09:33:15 2025 +1100

    Merge branch 'main' of 
https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git

diff --combined Documentation/devicetree/bindings/vendor-prefixes.yaml
index 003cc91fb02f8,424aa7b911a77..fa303ee1fe1fa
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@@ -20,7 -20,7 +20,7 @@@ patternProperties
    "^(keypad|m25p|max8952|max8997|max8998|mpmc),.*": true
    "^(pciclass|pinctrl-single|#pinctrl-single|PowerPC),.*": true
    "^(pl022|pxa-mmc|rcar_sound|rotary-encoder|s5m8767|sdhci),.*": true
-   "^(simple-audio-card|st-plgpio|st-spics|ts),.*": true
+   "^(simple-audio-card|st-plgpio|st-spics|ts|vsc8531),.*": true
    "^pool[0-3],.*": true
  
    # Keep list in alphabetical order.
@@@ -907,8 -907,6 +907,8 @@@
      description: Lincoln Technology Solutions
    "^lineartechnology,.*":
      description: Linear Technology
 +  "^linkease,.*":
 +    description: Shenzhen LinkEase Network Technology Co., Ltd.
    "^linksprite,.*":
      description: LinkSprite Technologies, Inc.
    "^linksys,.*":
diff --combined MAINTAINERS
index 5da5c59b4eabe,d652f4f27756e..06ea38b96dc1d
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -1997,10 -1997,6 +1997,10 @@@ F:    include/uapi/linux/if_arcnet.
  
  ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
  M:    Arnd Bergmann <[email protected]>
 +M:    Krzysztof Kozlowski <[email protected]>
 +M:    Alexandre Belloni <[email protected]>
 +M:    Linus Walleij <[email protected]>
 +R:    Drew Fustini <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
  L:    [email protected]
  S:    Maintained
@@@ -3300,6 -3296,7 +3300,7 @@@ F:      drivers/*/*/*rockchip
  F:    drivers/*/*rockchip*
  F:    drivers/clk/rockchip/
  F:    drivers/i2c/busses/i2c-rk3x.c
+ F:    drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
  F:    sound/soc/rockchip/
  N:    rockchip
  
@@@ -3441,6 -3438,7 +3442,6 @@@ F:      drivers/clocksource/clksrc_st_lpc.
  F:    drivers/cpufreq/sti-cpufreq.c
  F:    drivers/dma/st_fdma*
  F:    drivers/i2c/busses/i2c-st.c
 -F:    drivers/media/platform/st/sti/c8sectpfe/
  F:    drivers/media/rc/st_rc.c
  F:    drivers/mmc/host/sdhci-st.c
  F:    drivers/phy/st/phy-miphy28lp.c
@@@ -5128,7 -5126,6 +5129,6 @@@ F:      Documentation/devicetree/bindings/ne
  F:    drivers/net/ethernet/broadcom/genet/
  F:    drivers/net/ethernet/broadcom/unimac.h
  F:    drivers/net/mdio/mdio-bcm-unimac.c
- F:    include/linux/platform_data/bcmgenet.h
  F:    include/linux/platform_data/mdio-bcm-unimac.h
  
  BROADCOM IPROC ARM ARCHITECTURE
@@@ -7416,6 -7413,7 +7416,6 @@@ S:      Maintaine
  P:    Documentation/doc-guide/maintainer-profile.rst
  T:    git git://git.lwn.net/linux.git docs-next
  F:    Documentation/
 -F:    scripts/check-variable-fonts.sh
  F:    scripts/checktransupdate.py
  F:    scripts/documentation-file-ref-check
  F:    scripts/get_abi.py
@@@ -7424,6 -7422,7 +7424,6 @@@ F:      scripts/lib/abi/
  F:    scripts/lib/kdoc/*
  F:    tools/docs/*
  F:    tools/net/ynl/pyynl/lib/doc_generator.py
 -F:    scripts/sphinx-pre-install
  X:    Documentation/ABI/
  X:    Documentation/admin-guide/media/
  X:    Documentation/devicetree/
@@@ -7458,7 -7457,7 +7458,7 @@@ L:      [email protected]
  S:    Maintained
  F:    Documentation/sphinx/parse-headers.pl
  F:    scripts/documentation-file-ref-check
 -F:    scripts/sphinx-pre-install
 +F:    tools/docs/sphinx-pre-install
  
  DOCUMENTATION/ITALIAN
  M:    Federico Vaga <[email protected]>
@@@ -7481,7 -7480,7 +7481,7 @@@ F:      Documentation/devicetree/bindings/me
  F:    drivers/media/i2c/dw9714.c
  
  DONGWOON DW9719 LENS VOICE COIL DRIVER
 -M:    Daniel Scally <[email protected]>
 +M:    Daniel Scally <[email protected]>
  L:    [email protected]
  S:    Maintained
  T:    git git://linuxtv.org/media.git
@@@ -9183,9 -9182,6 +9183,9 @@@ S:      Maintaine
  F:    kernel/power/energy_model.c
  F:    include/linux/energy_model.h
  F:    Documentation/power/energy-model.rst
 +F:    Documentation/netlink/specs/em.yaml
 +F:    include/uapi/linux/energy_model.h
 +F:    kernel/power/em_netlink*.*
  
  EPAPR HYPERVISOR BYTE CHANNEL DEVICE DRIVER
  M:    Laurentiu Tudor <[email protected]>
@@@ -10533,7 -10529,7 +10533,7 @@@ L:   [email protected]
  S:    Supported
  B:    
https://bugzilla.kernel.org/enter_bug.cgi?product=File%20System&component=gfs2
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git
 -F:    Documentation/filesystems/gfs2*
 +F:    Documentation/filesystems/gfs2/
  F:    fs/gfs2/
  F:    include/uapi/linux/gfs2_ondisk.h
  
@@@ -10606,9 -10602,8 +10606,9 @@@ F:   Documentation/devicetree/bindings/cl
  F:    
Documentation/devicetree/bindings/soc/google/google,gs101-pmu-intr-gen.yaml
  F:    arch/arm64/boot/dts/exynos/google/
  F:    drivers/clk/samsung/clk-gs101.c
 +F:    drivers/soc/samsung/gs101-pmu.c
  F:    drivers/phy/samsung/phy-gs101-ufs.c
 -F:    include/dt-bindings/clock/google,gs101.h
 +F:    include/dt-bindings/clock/google,gs101*
  K:    [gG]oogle.?[tT]ensor
  
  GPD FAN DRIVER
@@@ -11581,7 -11576,7 +11581,7 @@@ T:   git git://linuxtv.org/media.gi
  F:    drivers/media/i2c/hi556.c
  
  HYNIX HI846 SENSOR DRIVER
 -M:    Martin Kepplinger <[email protected]>
 +M:    Martin Kepplinger-Novakovic <[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    drivers/media/i2c/hi846.c
@@@ -12619,7 -12614,7 +12619,7 @@@ INTEL IPU3 CSI-2 CIO2 DRIVE
  M:    Yong Zhi <[email protected]>
  M:    Sakari Ailus <[email protected]>
  M:    Bingbu Cao <[email protected]>
 -M:    Dan Scally <[email protected]>
 +M:    Dan Scally <[email protected]>
  R:    Tianshu Qiu <[email protected]>
  L:    [email protected]
  S:    Maintained
@@@ -12862,8 -12857,7 +12862,8 @@@ F:   tools/testing/selftests/sgx/
  K:    \bSGX_
  
  INTEL SKYLAKE INT3472 ACPI DEVICE DRIVER
 -M:    Daniel Scally <[email protected]>
 +M:    Daniel Scally <[email protected]>
 +M:    Sakari Ailus <[email protected]>
  S:    Maintained
  F:    drivers/platform/x86/intel/int3472/
  F:    include/linux/platform_data/x86/int3472.h
@@@ -13118,15 -13112,6 +13118,15 @@@ F: include/uapi/linux/io_uring.
  F:    include/uapi/linux/io_uring/
  F:    io_uring/
  
 +IO_URING ZCRX
 +M:    Pavel Begunkov <[email protected]>
 +L:    [email protected]
 +L:    [email protected]
 +T:    git https://github.com/isilence/linux.git zcrx/for-next
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux.git
 +S:    Maintained
 +F:    io_uring/zcrx.*
 +
  IPMI SUBSYSTEM
  M:    Corey Minyard <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
@@@ -13262,8 -13247,10 +13262,8 @@@ T:  git git://git.kernel.org/pub/scm/lin
  F:    drivers/infiniband/ulp/isert
  
  ISDN/CMTP OVER BLUETOOTH
 -M:    Karsten Keil <[email protected]>
 -L:    [email protected] (subscribers-only)
  L:    [email protected]
 -S:    Odd Fixes
 +S:    Orphan
  W:    http://www.isdn4linux.de
  F:    Documentation/isdn/
  F:    drivers/isdn/capi/
@@@ -13272,8 -13259,10 +13272,8 @@@ F:  include/uapi/linux/isdn
  F:    net/bluetooth/cmtp/
  
  ISDN/mISDN SUBSYSTEM
 -M:    Karsten Keil <[email protected]>
 -L:    [email protected] (subscribers-only)
  L:    [email protected]
 -S:    Maintained
 +S:    Orphan
  W:    http://www.isdn4linux.de
  F:    drivers/isdn/Kconfig
  F:    drivers/isdn/Makefile
@@@ -13427,12 -13416,9 +13427,12 @@@ F: mm/kasan
  F:    scripts/Makefile.kasan
  
  KCONFIG
 +M:    Nathan Chancellor <[email protected]>
 +M:    Nicolas Schier <[email protected]>
  L:    [email protected]
 -S:    Orphan
 +S:    Odd Fixes
  Q:    https://patchwork.kernel.org/project/linux-kbuild/list/
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/kbuild/linux.git
  F:    Documentation/kbuild/kconfig*
  F:    scripts/Kconfig.include
  F:    scripts/kconfig/
@@@ -13498,7 -13484,7 +13498,7 @@@ F:   fs/autofs
  
  KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
  M:    Nathan Chancellor <[email protected]>
 -M:    Nicolas Schier <[email protected]>
 +M:    Nicolas Schier <[email protected]>
  L:    [email protected]
  S:    Odd Fixes
  Q:    https://patchwork.kernel.org/project/linux-kbuild/list/
@@@ -13617,7 -13603,7 +13617,7 @@@ F:   fs/smb/server
  KERNEL UNIT TESTING FRAMEWORK (KUnit)
  M:    Brendan Higgins <[email protected]>
  M:    David Gow <[email protected]>
 -R:    Rae Moar <[email protected]>
 +R:    Rae Moar <[email protected]>
  L:    [email protected]
  L:    [email protected]
  S:    Maintained
@@@ -14409,7 -14395,6 +14409,7 @@@ F:   tools/memory-model
  
  LINUX-NEXT TREE
  M:    Stephen Rothwell <[email protected]>
 +M:    Mark Brown <[email protected]>
  L:    [email protected]
  S:    Supported
  B:    mailto:[email protected] and the appropriate development tree
@@@ -15182,15 -15167,6 +15182,15 @@@ S: Maintaine
  F:    Documentation/hwmon/max15301.rst
  F:    drivers/hwmon/pmbus/max15301.c
  
 +MAX17616 HARDWARE MONITOR DRIVER
 +M:    Kim Seer Paller <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +W:    https://ez.analog.com/linux-software-drivers
 +F:    Documentation/devicetree/bindings/hwmon/pmbus/adi,max17616.yaml
 +F:    Documentation/hwmon/max17616.rst
 +F:    drivers/hwmon/pmbus/max17616.c
 +
  MAX2175 SDR TUNER DRIVER
  M:    Ramesh Shanmugasundaram <[email protected]>
  L:    [email protected]
@@@ -15561,7 -15537,7 +15561,7 @@@ F:   include/media/imx.
  MEDIA DRIVERS FOR FREESCALE IMX7/8
  M:    Rui Miguel Silva <[email protected]>
  M:    Laurent Pinchart <[email protected]>
 -M:    Martin Kepplinger <[email protected]>
 +M:    Martin Kepplinger-Novakovic <[email protected]>
  R:    Purism Kernel Team <[email protected]>
  L:    [email protected]
  S:    Maintained
@@@ -16212,7 -16188,7 +16212,7 @@@ MEMORY CONTROLLER DRIVER
  M:    Krzysztof Kozlowski <[email protected]>
  L:    [email protected]
  S:    Maintained
 -B:    mailto:[email protected]
 +B:    mailto:[email protected]
  T:    git 
git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl.git
  F:    Documentation/devicetree/bindings/memory-controllers/
  F:    drivers/memory/
@@@ -16274,7 -16250,6 +16274,7 @@@ F:   include/linux/mmzone.
  F:    include/linux/mmdebug.h
  F:    include/linux/mmu_notifier.h
  F:    include/linux/pagewalk.h
 +F:    include/linux/pgalloc.h
  F:    include/linux/pgtable.h
  F:    include/linux/ptdump.h
  F:    include/linux/vmpressure.h
@@@ -17462,6 -17437,14 +17462,14 @@@ S: Maintaine
  F:    Documentation/devicetree/bindings/net/motorcomm,yt8xxx.yaml
  F:    drivers/net/phy/motorcomm.c
  
+ MOTORCOMM YT921X ETHERNET SWITCH DRIVER
+ M:    David Yang <[email protected]>
+ L:    [email protected]
+ S:    Maintained
+ F:    Documentation/devicetree/bindings/net/dsa/motorcomm,yt921x.yaml
+ F:    drivers/net/dsa/yt921x.*
+ F:    net/dsa/tag_yt921x.c
+ 
  MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
  M:    Jiri Slaby <[email protected]>
  S:    Maintained
@@@ -17489,13 -17472,6 +17497,13 @@@ S: Maintaine
  F:    Documentation/hwmon/mp2891.rst
  F:    drivers/hwmon/pmbus/mp2891.c
  
 +MPS MP2925 DRIVER
 +M:    Noah Wang <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/hwmon/mp2925.rst
 +F:    drivers/hwmon/pmbus/mp2925.c
 +
  MPS MP29502 DRIVER
  M:    Wensheng Wang <[email protected]>
  L:    [email protected]
@@@ -17517,13 -17493,6 +17525,13 @@@ S: Maintaine
  F:    Documentation/hwmon/mp9941.rst
  F:    drivers/hwmon/pmbus/mp9941.c
  
 +MPS MP9945 DRIVER
 +M:    Cosmo Chou <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/hwmon/mp9945.rst
 +F:    drivers/hwmon/pmbus/mp9945.c
 +
  MR800 AVERMEDIA USB FM RADIO DRIVER
  M:    Alexey Klimov <[email protected]>
  L:    [email protected]
@@@ -19022,9 -18991,10 +19030,9 @@@ T:  git git://linuxtv.org/media.gi
  F:    drivers/media/i2c/ov08d10.c
  
  OMNIVISION OV08X40 SENSOR DRIVER
 -M:    Jason Chen <[email protected]>
 +M:    Jimmy Su <[email protected]>
  L:    [email protected]
  S:    Maintained
 -T:    git git://linuxtv.org/media.git
  F:    drivers/media/i2c/ov08x40.c
  F:    Documentation/devicetree/bindings/media/i2c/ovti,ov08x40.yaml
  
@@@ -19117,7 -19087,7 +19125,7 @@@ F:   Documentation/devicetree/bindings/me
  F:    drivers/media/i2c/ov5675.c
  
  OMNIVISION OV5693 SENSOR DRIVER
 -M:    Daniel Scally <[email protected]>
 +M:    Daniel Scally <[email protected]>
  L:    [email protected]
  S:    Maintained
  T:    git git://linuxtv.org/media.git
@@@ -21198,7 -21168,7 +21206,7 @@@ F:   Documentation/devicetree/bindings/i2
  F:    drivers/i2c/busses/i2c-qcom-cci.c
  
  QUALCOMM INTERCONNECT BWMON DRIVER
 -M:    Krzysztof Kozlowski <[email protected]>
 +M:    Krzysztof Kozlowski <[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
@@@ -21356,7 -21326,6 +21364,7 @@@ F:   drivers/media/platform/qcom/venus
  QUALCOMM WCN36XX WIRELESS DRIVER
  M:    Loic Poulain <[email protected]>
  L:    [email protected]
 +L:    [email protected]
  S:    Supported
  W:    https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
  F:    drivers/net/wireless/ath/wcn36xx/
@@@ -22812,7 -22781,6 +22820,7 @@@ L:   [email protected]
  L:    [email protected]
  S:    Supported
  F:    Documentation/devicetree/bindings/firmware/google,gs101-acpm-ipc.yaml
 +F:    drivers/clk/samsung/clk-acpm.c
  F:    drivers/firmware/samsung/exynos-acpm*
  F:    include/linux/firmware/samsung/exynos-acpm-protocol.h
  
@@@ -24946,7 -24914,7 +24954,7 @@@ F:   drivers/net/pcs/pcs-xpcs.
  F:    include/linux/pcs/pcs-xpcs.h
  
  SYNOPSYS DESIGNWARE HDMI RX CONTROLLER DRIVER
 -M:    Shreeya Patel <[email protected]>
 +M:    Dmitry Osipenko <[email protected]>
  L:    [email protected]
  L:    [email protected]
  S:    Maintained
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 782bb48c9f3d7,81d3bdc098e63..cf8abbe018402
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@@ -9429,7 -9429,8 +9429,7 @@@ static int hclge_mii_ioctl(struct hclge
                /* this command reads phy id and register at the same time */
                fallthrough;
        case SIOCGMIIREG:
 -              data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
 -              return 0;
 +              return hclge_read_phy_reg(hdev, data->reg_num, &data->val_out);
  
        case SIOCSMIIREG:
                return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
@@@ -9444,15 -9445,8 +9444,8 @@@ static int hclge_do_ioctl(struct hnae3_
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
  
-       switch (cmd) {
-       case SIOCGHWTSTAMP:
-               return hclge_ptp_get_cfg(hdev, ifr);
-       case SIOCSHWTSTAMP:
-               return hclge_ptp_set_cfg(hdev, ifr);
-       default:
-               if (!hdev->hw.mac.phydev)
-                       return hclge_mii_ioctl(hdev, ifr, cmd);
-       }
+       if (!hdev->hw.mac.phydev)
+               return hclge_mii_ioctl(hdev, ifr, cmd);
  
        return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
  }
@@@ -12900,6 -12894,8 +12893,8 @@@ static const struct hnae3_ae_ops hclge_
        .get_dscp_prio = hclge_get_dscp_prio,
        .get_wol = hclge_get_wol,
        .set_wol = hclge_set_wol,
+       .hwtstamp_get = hclge_ptp_get_cfg,
+       .hwtstamp_set = hclge_ptp_set_cfg,
  };
  
  static struct hnae3_ae_algo ae_algo = {
diff --combined drivers/net/ethernet/intel/ice/ice_common.c
index 2532b6f82e971,b097cc8b175cb..6edeb06b4dce2
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@@ -1161,6 -1161,9 +1161,9 @@@ int ice_init_hw(struct ice_hw *hw
        status = ice_init_hw_tbls(hw);
        if (status)
                goto err_unroll_fltr_mgmt_struct;
+ 
+       ice_init_dev_hw(hw->back);
+ 
        mutex_init(&hw->tnl_lock);
        ice_init_chk_recipe_reuse_support(hw);
  
@@@ -4382,15 -4385,6 +4385,15 @@@ int ice_get_phy_lane_number(struct ice_
        unsigned int lane;
        int err;
  
 +      /* E82X does not have sequential IDs, lane number is PF ID.
 +       * For E825 device, the exception is the variant with external
 +       * PHY (0x579F), in which there is also 1:1 pf_id -> lane_number
 +       * mapping.
 +       */
 +      if (hw->mac_type == ICE_MAC_GENERIC ||
 +          hw->device_id == ICE_DEV_ID_E825C_SGMII)
 +              return hw->pf_id;
 +
        options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
        if (!options)
                return -ENOMEM;
@@@ -6505,28 -6499,6 +6508,28 @@@ u32 ice_get_link_speed(u16 index
        return ice_aq_to_link_speed[index];
  }
  
 +/**
 + * ice_get_dest_cgu - get destination CGU dev for given HW
 + * @hw: pointer to the HW struct
 + *
 + * Get CGU client id for CGU register read/write operations.
 + *
 + * Return: CGU device id to use in SBQ transactions.
 + */
 +static enum ice_sbq_dev_id ice_get_dest_cgu(struct ice_hw *hw)
 +{
 +      /* On dual complex E825 only complex 0 has functional CGU powering all
 +       * the PHYs.
 +       * SBQ destination device cgu points to CGU on a current complex and to
 +       * access primary CGU from the secondary complex, the driver should use
 +       * cgu_peer as a destination device.
 +       */
 +      if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) &&
 +          !ice_is_primary(hw))
 +              return ice_sbq_dev_cgu_peer;
 +      return ice_sbq_dev_cgu;
 +}
 +
  /**
   * ice_read_cgu_reg - Read a CGU register
   * @hw: Pointer to the HW struct
@@@ -6541,8 -6513,8 +6544,8 @@@
  int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
  {
        struct ice_sbq_msg_input cgu_msg = {
 +              .dest_dev = ice_get_dest_cgu(hw),
                .opcode = ice_sbq_msg_rd,
 -              .dest_dev = ice_sbq_dev_cgu,
                .msg_addr_low = addr
        };
        int err;
@@@ -6573,8 -6545,8 +6576,8 @@@
  int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val)
  {
        struct ice_sbq_msg_input cgu_msg = {
 +              .dest_dev = ice_get_dest_cgu(hw),
                .opcode = ice_sbq_msg_wr,
 -              .dest_dev = ice_sbq_dev_cgu,
                .msg_addr_low = addr,
                .data = val
        };
diff --combined drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 013c93b6605ed,06b8786ae3abd..c8cb492fddf46
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@@ -574,9 -574,7 +574,7 @@@ ice_destroy_tunnel_end
  int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
                            unsigned int idx, struct udp_tunnel_info *ti)
  {
-       struct ice_netdev_priv *np = netdev_priv(netdev);
-       struct ice_vsi *vsi = np->vsi;
-       struct ice_pf *pf = vsi->back;
+       struct ice_pf *pf = ice_netdev_to_pf(netdev);
        enum ice_tunnel_type tnl_type;
        int status;
        u16 index;
@@@ -598,9 -596,7 +596,7 @@@
  int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
                              unsigned int idx, struct udp_tunnel_info *ti)
  {
-       struct ice_netdev_priv *np = netdev_priv(netdev);
-       struct ice_vsi *vsi = np->vsi;
-       struct ice_pf *pf = vsi->back;
+       struct ice_pf *pf = ice_netdev_to_pf(netdev);
        enum ice_tunnel_type tnl_type;
        int status;
  
@@@ -1479,7 -1475,7 +1475,7 @@@ static void ice_init_prof_masks(struct 
        per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
  
        hw->blk[blk].masks.count = per_pf;
 -      hw->blk[blk].masks.first = hw->pf_id * per_pf;
 +      hw->blk[blk].masks.first = hw->logical_pf_id * per_pf;
  
        memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
  
diff --combined drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 44a142a041b2f,9735a75732cf5..4092ea29c6308
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@@ -1978,6 -1978,7 +1978,6 @@@ static void esw_destroy_offloads_fdb_ta
        /* Holds true only as long as DMFS is the default */
        mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
                                     MLX5_FLOW_STEERING_MODE_DMFS);
 -      atomic64_set(&esw->user_count, 0);
  }
  
  static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
@@@ -3556,10 -3557,11 +3556,11 @@@ bool mlx5_esw_offloads_controller_valid
  
  int esw_offloads_enable(struct mlx5_eswitch *esw)
  {
+       u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
        struct mapping_ctx *reg_c0_obj_pool;
        struct mlx5_vport *vport;
        unsigned long i;
-       u64 mapping_id;
+       u8 id_len;
        int err;
  
        mutex_init(&esw->offloads.termtbl_mutex);
@@@ -3581,9 -3583,10 +3582,10 @@@
        if (err)
                goto err_vport_metadata;
  
-       mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
+       mlx5_query_nic_sw_system_image_guid(esw->dev, mapping_id, &id_len);
  
-       reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
+       reg_c0_obj_pool = mapping_create_for_id(mapping_id, id_len,
+                                               MAPPING_TYPE_CHAIN,
                                                sizeof(struct mlx5_mapped_obj),
                                                
ESW_REG_C0_USER_DATA_METADATA_MASK,
                                                true);
diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7b90ecd3a55e6,ba4eeba14baaa..1e69c1a7dc6c5
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@@ -40,12 -40,14 +40,14 @@@
  #include <linux/phylink.h>
  #include <linux/udp.h>
  #include <linux/bpf_trace.h>
+ #include <net/devlink.h>
  #include <net/page_pool/helpers.h>
  #include <net/pkt_cls.h>
  #include <net/xdp_sock_drv.h>
  #include "stmmac_ptp.h"
  #include "stmmac_fpe.h"
  #include "stmmac.h"
+ #include "stmmac_pcs.h"
  #include "stmmac_xdp.h"
  #include <linux/reset.h>
  #include <linux/of_mdio.h>
@@@ -57,8 -59,7 +59,7 @@@
   * with fine resolution and binary rollover. This avoid non-monotonic behavior
   * (clock jumps) when changing timestamping settings at runtime.
   */
- #define STMMAC_HWTS_ACTIVE    (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
-                                PTP_TCR_TSCTRLSSR)
+ #define STMMAC_HWTS_ACTIVE    (PTP_TCR_TSENA | PTP_TCR_TSCTRLSSR)
  
  #define       STMMAC_ALIGN(x)         ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
  #define       TSO_MAX_BUFF_SIZE       (SZ_16K - 1)
@@@ -147,6 -148,15 +148,15 @@@ static void stmmac_exit_fs(struct net_d
  
  #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
  
+ struct stmmac_devlink_priv {
+       struct stmmac_priv *stmmac_priv;
+ };
+ 
+ enum stmmac_dl_param_id {
+       STMMAC_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+       STMMAC_DEVLINK_PARAM_ID_TS_COARSE,
+ };
+ 
  /**
   * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
   * @bsp_priv: BSP private data structure (unused)
@@@ -445,7 -455,7 +455,7 @@@ static void stmmac_get_rx_hwtstamp(stru
        if (!priv->hwts_rx_en)
                return;
        /* For GMAC4, the valid timestamp is from CTX next desc. */
-       if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+       if (dwmac_is_xmac(priv->plat->core_type))
                desc = np;
  
        /* Check if timestamp is available */
@@@ -463,6 -473,33 +473,33 @@@
        }
  }
  
+ static void stmmac_update_subsecond_increment(struct stmmac_priv *priv)
+ {
+       bool xmac = dwmac_is_xmac(priv->plat->core_type);
+       u32 sec_inc = 0;
+       u64 temp = 0;
+ 
+       stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
+ 
+       /* program Sub Second Increment reg */
+       stmmac_config_sub_second_increment(priv, priv->ptpaddr,
+                                          priv->plat->clk_ptp_rate,
+                                          xmac, &sec_inc);
+       temp = div_u64(1000000000ULL, sec_inc);
+ 
+       /* Store sub second increment for later use */
+       priv->sub_second_inc = sec_inc;
+ 
+       /* calculate default added value:
+        * formula is :
+        * addend = (2^32)/freq_div_ratio;
+        * where, freq_div_ratio = 1e9ns/sec_inc
+        */
+       temp = (u64)(temp << 32);
+       priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
+       stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+ }
+ 
  /**
   *  stmmac_hwtstamp_set - control hardware timestamping.
   *  @dev: device pointer.
@@@ -647,6 -684,8 +684,8 @@@ static int stmmac_hwtstamp_set(struct n
        priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
  
        priv->systime_flags = STMMAC_HWTS_ACTIVE;
+       if (!priv->tsfupdt_coarse)
+               priv->systime_flags |= PTP_TCR_TSCFUPDT;
  
        if (priv->hwts_tx_en || priv->hwts_rx_en) {
                priv->systime_flags |= tstamp_all | ptp_v2 |
@@@ -696,10 -735,7 +735,7 @@@ static int stmmac_hwtstamp_get(struct n
  static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
                                      u32 systime_flags)
  {
-       bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
        struct timespec64 now;
-       u32 sec_inc = 0;
-       u64 temp = 0;
  
        if (!priv->plat->clk_ptp_rate) {
                netdev_err(priv->dev, "Invalid PTP clock rate");
@@@ -709,23 -745,7 +745,7 @@@
        stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
        priv->systime_flags = systime_flags;
  
-       /* program Sub Second Increment reg */
-       stmmac_config_sub_second_increment(priv, priv->ptpaddr,
-                                          priv->plat->clk_ptp_rate,
-                                          xmac, &sec_inc);
-       temp = div_u64(1000000000ULL, sec_inc);
- 
-       /* Store sub second increment for later use */
-       priv->sub_second_inc = sec_inc;
- 
-       /* calculate default added value:
-        * formula is :
-        * addend = (2^32)/freq_div_ratio;
-        * where, freq_div_ratio = 1e9ns/sec_inc
-        */
-       temp = (u64)(temp << 32);
-       priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
-       stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+       stmmac_update_subsecond_increment(priv);
  
        /* initialize system time */
        ktime_get_real_ts64(&now);
@@@ -745,7 -765,7 +765,7 @@@
   */
  static int stmmac_init_timestamping(struct stmmac_priv *priv)
  {
-       bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+       bool xmac = dwmac_is_xmac(priv->plat->core_type);
        int ret;
  
        if (priv->plat->ptp_clk_freq_config)
@@@ -756,7 -776,8 +776,8 @@@
                return -EOPNOTSUPP;
        }
  
-       ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
+       ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE |
+                                              PTP_TCR_TSCFUPDT);
        if (ret) {
                netdev_warn(priv->dev, "PTP init failed\n");
                return ret;
@@@ -850,6 -871,13 +871,13 @@@ static struct phylink_pcs *stmmac_mac_s
                        return pcs;
        }
  
+       /* The PCS control register is only relevant for SGMII, TBI and RTBI
+        * modes. We no longer support TBI or RTBI, so only configure this
+        * register when operating in SGMII mode with the integrated PCS.
+        */
+       if (priv->hw->pcs & STMMAC_PCS_SGMII && priv->integrated_pcs)
+               return &priv->integrated_pcs->pcs;
+ 
        return NULL;
  }
  
@@@ -859,6 -887,18 +887,18 @@@ static void stmmac_mac_config(struct ph
        /* Nothing to do, xpcs_config() handles everything */
  }
  
+ static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
+                            phy_interface_t interface)
+ {
+       struct net_device *ndev = to_net_dev(config->dev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+ 
+       if (priv->plat->mac_finish)
+               priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, 
interface);
+ 
+       return 0;
+ }
+ 
  static void stmmac_mac_link_down(struct phylink_config *config,
                                 unsigned int mode, phy_interface_t interface)
  {
@@@ -1053,14 -1093,16 +1093,16 @@@ static int stmmac_mac_enable_tx_lpi(str
        return 0;
  }
  
- static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
-                            phy_interface_t interface)
+ static int stmmac_mac_wol_set(struct phylink_config *config, u32 wolopts,
+                             const u8 *sopass)
  {
-       struct net_device *ndev = to_net_dev(config->dev);
-       struct stmmac_priv *priv = netdev_priv(ndev);
+       struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
  
-       if (priv->plat->mac_finish)
-               priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, 
interface);
+       device_set_wakeup_enable(priv->device, !!wolopts);
+ 
+       mutex_lock(&priv->lock);
+       priv->wolopts = wolopts;
+       mutex_unlock(&priv->lock);
  
        return 0;
  }
@@@ -1069,11 -1111,12 +1111,12 @@@ static const struct phylink_mac_ops stm
        .mac_get_caps = stmmac_mac_get_caps,
        .mac_select_pcs = stmmac_mac_select_pcs,
        .mac_config = stmmac_mac_config,
+       .mac_finish = stmmac_mac_finish,
        .mac_link_down = stmmac_mac_link_down,
        .mac_link_up = stmmac_mac_link_up,
        .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
        .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
-       .mac_finish = stmmac_mac_finish,
+       .mac_wol_set = stmmac_mac_wol_set,
  };
  
  /**
@@@ -1086,17 -1129,25 +1129,25 @@@
  static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
  {
        int interface = priv->plat->phy_interface;
+       int speed = priv->plat->mac_port_sel_speed;
+ 
+       if (priv->dma_cap.pcs && interface == PHY_INTERFACE_MODE_SGMII) {
+               netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
+               priv->hw->pcs = STMMAC_PCS_SGMII;
  
-       if (priv->dma_cap.pcs) {
-               if ((interface == PHY_INTERFACE_MODE_RGMII) ||
-                   (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
-                   (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
-                   (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
-                       netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
-                       priv->hw->pcs = STMMAC_PCS_RGMII;
-               } else if (interface == PHY_INTERFACE_MODE_SGMII) {
-                       netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
-                       priv->hw->pcs = STMMAC_PCS_SGMII;
+               switch (speed) {
+               case SPEED_10:
+               case SPEED_100:
+               case SPEED_1000:
+                       priv->hw->reverse_sgmii_enable = true;
+                       break;
+ 
+               default:
+                       dev_warn(priv->device, "invalid port speed\n");
+                       fallthrough;
+               case 0:
+                       priv->hw->reverse_sgmii_enable = false;
+                       break;
                }
        }
  }
@@@ -1174,18 -1225,10 +1225,10 @@@ static int stmmac_init_phy(struct net_d
                phylink_ethtool_set_eee(priv->phylink, &eee);
        }
  
-       if (!priv->plat->pmt) {
-               struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
- 
-               phylink_ethtool_get_wol(priv->phylink, &wol);
-               device_set_wakeup_capable(priv->device, !!wol.supported);
-               device_set_wakeup_enable(priv->device, !!wol.wolopts);
-       }
- 
        return 0;
  }
  
- static int stmmac_phy_setup(struct stmmac_priv *priv)
+ static int stmmac_phylink_setup(struct stmmac_priv *priv)
  {
        struct stmmac_mdio_bus_data *mdio_bus_data;
        struct phylink_config *config;
@@@ -1250,6 -1293,16 +1293,16 @@@
                config->eee_enabled_default = true;
        }
  
+       config->wol_phy_speed_ctrl = true;
+       if (priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL) {
+               config->wol_phy_legacy = true;
+       } else {
+               if (priv->dma_cap.pmt_remote_wake_up)
+                       config->wol_mac_support |= WAKE_UCAST;
+               if (priv->dma_cap.pmt_magic_frame)
+                       config->wol_mac_support |= WAKE_MAGIC;
+       }
+ 
        fwnode = priv->plat->port_node;
        if (!fwnode)
                fwnode = dev_fwnode(priv->device);
@@@ -2397,7 -2450,7 +2450,7 @@@ static void stmmac_dma_operation_mode(s
                txfifosz = priv->dma_cap.tx_fifo_size;
  
        /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
-       if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
+       if (dwmac_is_xmac(priv->plat->core_type)) {
                rxfifosz /= rx_channels_count;
                txfifosz /= tx_channels_count;
        }
@@@ -3443,19 -3496,6 +3496,6 @@@ static int stmmac_hw_setup(struct net_d
        stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
        phylink_rx_clk_stop_unblock(priv->phylink);
  
-       /* PS and related bits will be programmed according to the speed */
-       if (priv->hw->pcs) {
-               int speed = priv->plat->mac_port_sel_speed;
- 
-               if ((speed == SPEED_10) || (speed == SPEED_100) ||
-                   (speed == SPEED_1000)) {
-                       priv->hw->ps = speed;
-               } else {
-                       dev_warn(priv->device, "invalid port speed\n");
-                       priv->hw->ps = 0;
-               }
-       }
- 
        /* Initialize the MAC Core */
        stmmac_core_init(priv, priv->hw, dev);
  
@@@ -3492,9 -3532,6 +3532,6 @@@
                }
        }
  
-       if (priv->hw->pcs)
-               stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
- 
        /* set TX and RX rings length */
        stmmac_set_rings_length(priv);
  
@@@ -3963,8 -4000,6 +4000,6 @@@ static int __stmmac_open(struct net_dev
        stmmac_init_coalesce(priv);
  
        phylink_start(priv->phylink);
-       /* We may have called phylink_speed_down before */
-       phylink_speed_up(priv->phylink);
  
        ret = stmmac_request_irq(dev);
        if (ret)
@@@ -4015,6 -4050,9 +4050,9 @@@ static int stmmac_open(struct net_devic
  
        kfree(dma_conf);
  
+       /* We may have called phylink_speed_down before */
+       phylink_speed_up(priv->phylink);
+ 
        return ret;
  
  err_disconnect_phy:
@@@ -4032,13 -4070,6 +4070,6 @@@ static void __stmmac_release(struct net
        struct stmmac_priv *priv = netdev_priv(dev);
        u32 chan;
  
-       /* If the PHY or MAC has WoL enabled, then the PHY will not be
-        * suspended when phylink_stop() is called below. Set the PHY
-        * to its slowest speed to save power.
-        */
-       if (device_may_wakeup(priv->device))
-               phylink_speed_down(priv->phylink, false);
- 
        /* Stop and disconnect the PHY */
        phylink_stop(priv->phylink);
  
@@@ -4078,6 -4109,13 +4109,13 @@@ static int stmmac_release(struct net_de
  {
        struct stmmac_priv *priv = netdev_priv(dev);
  
+       /* If the PHY or MAC has WoL enabled, then the PHY will not be
+        * suspended when phylink_stop() is called below. Set the PHY
+        * to its slowest speed to save power.
+        */
+       if (device_may_wakeup(priv->device))
+               phylink_speed_down(priv->phylink, false);
+ 
        __stmmac_release(dev);
  
        phylink_disconnect_phy(priv->phylink);
@@@ -4089,11 -4127,18 +4127,11 @@@
  static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
                               struct stmmac_tx_queue *tx_q)
  {
 -      u16 tag = 0x0, inner_tag = 0x0;
 -      u32 inner_type = 0x0;
        struct dma_desc *p;
 +      u16 tag = 0x0;
  
 -      if (!priv->dma_cap.vlins)
 +      if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
                return false;
 -      if (!skb_vlan_tag_present(skb))
 -              return false;
 -      if (skb->vlan_proto == htons(ETH_P_8021AD)) {
 -              inner_tag = skb_vlan_tag_get(skb);
 -              inner_type = STMMAC_VLAN_INSERT;
 -      }
  
        tag = skb_vlan_tag_get(skb);
  
@@@ -4102,7 -4147,7 +4140,7 @@@
        else
                p = &tx_q->dma_tx[tx_q->cur_tx];
  
 -      if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
 +      if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
                return false;
  
        stmmac_set_tx_owner(priv, p);
@@@ -4500,7 -4545,6 +4538,7 @@@ static netdev_tx_t stmmac_xmit(struct s
        bool has_vlan, set_ic;
        int entry, first_tx;
        dma_addr_t des;
 +      u32 sdu_len;
  
        tx_q = &priv->dma_conf.tx_queue[queue];
        txq_stats = &priv->xstats.txq_stats[queue];
@@@ -4513,20 -4557,16 +4551,21 @@@
        if (skb_is_gso(skb) && priv->tso) {
                if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
                        return stmmac_tso_xmit(skb, dev);
-               if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
+               if (priv->plat->core_type == DWMAC_CORE_GMAC4 &&
+                   (gso & SKB_GSO_UDP_L4))
                        return stmmac_tso_xmit(skb, dev);
        }
  
        if (priv->est && priv->est->enable &&
 -          priv->est->max_sdu[queue] &&
 -          skb->len > priv->est->max_sdu[queue]){
 -              priv->xstats.max_sdu_txq_drop[queue]++;
 -              goto max_sdu_err;
 +          priv->est->max_sdu[queue]) {
 +              sdu_len = skb->len;
 +              /* Add VLAN tag length if VLAN tag insertion offload is 
requested */
 +              if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
 +                      sdu_len += VLAN_HLEN;
 +              if (sdu_len > priv->est->max_sdu[queue]) {
 +                      priv->xstats.max_sdu_txq_drop[queue]++;
 +                      goto max_sdu_err;
 +              }
        }
  
        if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@@ -5971,7 -6011,7 +6010,7 @@@ static void stmmac_common_interrupt(str
        u32 queue;
        bool xmac;
  
-       xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+       xmac = dwmac_is_xmac(priv->plat->core_type);
        queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
  
        if (priv->irq_wake)
@@@ -5985,7 -6025,7 +6024,7 @@@
                stmmac_fpe_irq_status(priv);
  
        /* To handle GMAC own interrupts */
-       if ((priv->plat->has_gmac) || xmac) {
+       if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) {
                int status = stmmac_host_irq_status(priv, priv->hw, 
&priv->xstats);
  
                if (unlikely(status)) {
@@@ -5999,15 -6039,6 +6038,6 @@@
                for (queue = 0; queue < queues_count; queue++)
                        stmmac_host_mtl_irq_status(priv, priv->hw, queue);
  
-               /* PCS link status */
-               if (priv->hw->pcs &&
-                   !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
-                       if (priv->xstats.pcs_link)
-                               netif_carrier_on(priv->dev);
-                       else
-                               netif_carrier_off(priv->dev);
-               }
- 
                stmmac_timestamp_interrupt(priv, priv);
        }
  }
@@@ -6355,7 -6386,7 +6385,7 @@@ static int stmmac_dma_cap_show(struct s
                   (priv->dma_cap.mbps_1000) ? "Y" : "N");
        seq_printf(seq, "\tHalf duplex: %s\n",
                   (priv->dma_cap.half_duplex) ? "Y" : "N");
-       if (priv->plat->has_xgmac) {
+       if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
                seq_printf(seq,
                           "\tNumber of Additional MAC address registers: %d\n",
                           priv->dma_cap.multi_addr);
@@@ -6379,7 -6410,7 +6409,7 @@@
                   (priv->dma_cap.time_stamp) ? "Y" : "N");
        seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
                   (priv->dma_cap.atime_stamp) ? "Y" : "N");
-       if (priv->plat->has_xgmac)
+       if (priv->plat->core_type == DWMAC_CORE_XGMAC)
                seq_printf(seq, "\tTimestamp System Time Source: %s\n",
                           dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
        seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
@@@ -6388,7 -6419,7 +6418,7 @@@
        seq_printf(seq, "\tChecksum Offload in TX: %s\n",
                   (priv->dma_cap.tx_coe) ? "Y" : "N");
        if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
-           priv->plat->has_xgmac) {
+           priv->plat->core_type == DWMAC_CORE_XGMAC) {
                seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
                           (priv->dma_cap.rx_coe) ? "Y" : "N");
        } else {
@@@ -7240,13 -7271,21 +7270,21 @@@ static int stmmac_hw_init(struct stmmac
         * has to be disable and this can be done by passing the
         * riwt_off field from the platform.
         */
-       if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
-           (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
+       if ((priv->synopsys_id >= DWMAC_CORE_3_50 ||
+            priv->plat->core_type == DWMAC_CORE_XGMAC) &&
+           !priv->plat->riwt_off) {
                priv->use_riwt = 1;
                dev_info(priv->device,
                         "Enable RX Mitigation via HW Watchdog Timer\n");
        }
  
+       /* Unimplemented PCS init (as indicated by stmmac_do_callback()
+        * perversely returning -EINVAL) is non-fatal.
+        */
+       ret = stmmac_mac_pcs_init(priv);
+       if (ret != -EINVAL)
+               return ret;
+ 
        return 0;
  }
  
@@@ -7355,7 -7394,7 +7393,7 @@@ static int stmmac_xdp_rx_timestamp(cons
                return -ENODATA;
  
        /* For GMAC4, the valid timestamp is from CTX next desc. */
-       if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+       if (dwmac_is_xmac(priv->plat->core_type))
                desc_contains_ts = ndesc;
  
        /* Check if timestamp is available */
@@@ -7373,6 -7412,95 +7411,95 @@@ static const struct xdp_metadata_ops st
        .xmo_rx_timestamp               = stmmac_xdp_rx_timestamp,
  };
  
+ static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id,
+                                  struct devlink_param_gset_ctx *ctx,
+                                  struct netlink_ext_ack *extack)
+ {
+       struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
+       struct stmmac_priv *priv = dl_priv->stmmac_priv;
+ 
+       priv->tsfupdt_coarse = ctx->val.vbool;
+ 
+       if (priv->tsfupdt_coarse)
+               priv->systime_flags &= ~PTP_TCR_TSCFUPDT;
+       else
+               priv->systime_flags |= PTP_TCR_TSCFUPDT;
+ 
+       /* In Coarse mode, we can use a smaller subsecond increment, let's
+        * reconfigure the systime, subsecond increment and addend.
+        */
+       stmmac_update_subsecond_increment(priv);
+ 
+       return 0;
+ }
+ 
+ static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id,
+                                  struct devlink_param_gset_ctx *ctx)
+ {
+       struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
+       struct stmmac_priv *priv = dl_priv->stmmac_priv;
+ 
+       ctx->val.vbool = priv->tsfupdt_coarse;
+ 
+       return 0;
+ }
+ 
+ static const struct devlink_param stmmac_devlink_params[] = {
+       DEVLINK_PARAM_DRIVER(STMMAC_DEVLINK_PARAM_ID_TS_COARSE, "ts_coarse",
+                            DEVLINK_PARAM_TYPE_BOOL,
+                            BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+                            stmmac_dl_ts_coarse_get,
+                            stmmac_dl_ts_coarse_set, NULL),
+ };
+ 
+ /* None of the generic devlink parameters are implemented */
+ static const struct devlink_ops stmmac_devlink_ops = {};
+ 
+ static int stmmac_register_devlink(struct stmmac_priv *priv)
+ {
+       struct stmmac_devlink_priv *dl_priv;
+       int ret;
+ 
+       /* For now, what is exposed over devlink is only relevant when
+        * timestamping is available and we have a valid ptp clock rate
+        */
+       if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp) ||
+           !priv->plat->clk_ptp_rate)
+               return 0;
+ 
+       priv->devlink = devlink_alloc(&stmmac_devlink_ops, sizeof(*dl_priv),
+                                     priv->device);
+       if (!priv->devlink)
+               return -ENOMEM;
+ 
+       dl_priv = devlink_priv(priv->devlink);
+       dl_priv->stmmac_priv = priv;
+ 
+       ret = devlink_params_register(priv->devlink, stmmac_devlink_params,
+                                     ARRAY_SIZE(stmmac_devlink_params));
+       if (ret)
+               goto dl_free;
+ 
+       devlink_register(priv->devlink);
+       return 0;
+ 
+ dl_free:
+       devlink_free(priv->devlink);
+ 
+       return ret;
+ }
+ 
+ static void stmmac_unregister_devlink(struct stmmac_priv *priv)
+ {
+       if (!priv->devlink)
+               return;
+ 
+       devlink_unregister(priv->devlink);
+       devlink_params_unregister(priv->devlink, stmmac_devlink_params,
+                                 ARRAY_SIZE(stmmac_devlink_params));
+       devlink_free(priv->devlink);
+ }
+ 
  /**
   * stmmac_dvr_probe
   * @device: device pointer
@@@ -7511,7 -7639,7 +7638,7 @@@ int stmmac_dvr_probe(struct device *dev
  
        if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
                ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
-               if (priv->plat->has_gmac4)
+               if (priv->plat->core_type == DWMAC_CORE_GMAC4)
                        ndev->hw_features |= NETIF_F_GSO_UDP_L4;
                priv->tso = true;
                dev_info(priv->device, "TSO feature enabled\n");
@@@ -7564,7 -7692,7 +7691,7 @@@
  #ifdef STMMAC_VLAN_TAG_USED
        /* Both mac100 and gmac support receive VLAN tag detection */
        ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
-       if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
+       if (dwmac_is_xmac(priv->plat->core_type)) {
                ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
                priv->hw->hw_vlan_en = true;
        }
@@@ -7572,8 -7700,11 +7699,8 @@@
                ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
                ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
        }
 -      if (priv->dma_cap.vlins) {
 +      if (priv->dma_cap.vlins)
                ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
 -              if (priv->dma_cap.dvlan)
 -                      ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
 -      }
  #endif
        priv->msg_enable = netif_msg_init(debug, default_msg_level);
  
@@@ -7592,7 -7723,7 +7719,7 @@@
  
        /* MTU range: 46 - hw-specific max */
        ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
-       if (priv->plat->has_xgmac)
+       if (priv->plat->core_type == DWMAC_CORE_XGMAC)
                ndev->max_mtu = XGMAC_JUMBO_LEN;
        else if ((priv->plat->enh_desc) || (priv->synopsys_id >= 
DWMAC_CORE_4_00))
                ndev->max_mtu = JUMBO_LEN;
@@@ -7637,12 -7768,16 +7764,16 @@@
        if (ret)
                goto error_pcs_setup;
  
-       ret = stmmac_phy_setup(priv);
+       ret = stmmac_phylink_setup(priv);
        if (ret) {
                netdev_err(ndev, "failed to setup phy (%d)\n", ret);
                goto error_phy_setup;
        }
  
+       ret = stmmac_register_devlink(priv);
+       if (ret)
+               goto error_devlink_setup;
+ 
        ret = register_netdev(ndev);
        if (ret) {
                dev_err(priv->device, "%s: ERROR %i registering the device\n",
@@@ -7665,6 -7800,8 +7796,8 @@@
        return ret;
  
  error_netdev_register:
+       stmmac_unregister_devlink(priv);
+ error_devlink_setup:
        phylink_destroy(priv->phylink);
  error_phy_setup:
        stmmac_pcs_clean(ndev);
@@@ -7701,6 -7838,8 +7834,8 @@@ void stmmac_dvr_remove(struct device *d
  #ifdef CONFIG_DEBUG_FS
        stmmac_exit_fs(ndev);
  #endif
+       stmmac_unregister_devlink(priv);
+ 
        phylink_destroy(priv->phylink);
        if (priv->plat->stmmac_rst)
                reset_control_assert(priv->plat->stmmac_rst);
@@@ -7755,7 -7894,7 +7890,7 @@@ int stmmac_suspend(struct device *dev
                priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
  
        /* Enable Power down mode by programming the PMT regs */
-       if (stmmac_wol_enabled_mac(priv)) {
+       if (priv->wolopts) {
                stmmac_pmt(priv, priv->hw, priv->wolopts);
                priv->irq_wake = 1;
        } else {
@@@ -7766,10 -7905,7 +7901,7 @@@
        mutex_unlock(&priv->lock);
  
        rtnl_lock();
-       if (stmmac_wol_enabled_phy(priv))
-               phylink_speed_down(priv->phylink, false);
- 
-       phylink_suspend(priv->phylink, stmmac_wol_enabled_mac(priv));
+       phylink_suspend(priv->phylink, !!priv->wolopts);
        rtnl_unlock();
  
        if (stmmac_fpe_supported(priv))
@@@ -7845,7 -7981,7 +7977,7 @@@ int stmmac_resume(struct device *dev
         * this bit because it can generate problems while resuming
         * from another devices (e.g. serial console).
         */
-       if (stmmac_wol_enabled_mac(priv)) {
+       if (priv->wolopts) {
                mutex_lock(&priv->lock);
                stmmac_pmt(priv, priv->hw, 0);
                mutex_unlock(&priv->lock);
@@@ -7907,9 -8043,6 +8039,6 @@@
         * workqueue thread, which will race with initialisation.
         */
        phylink_resume(priv->phylink);
-       if (stmmac_wol_enabled_phy(priv))
-               phylink_speed_up(priv->phylink);
- 
        rtnl_unlock();
  
        netif_device_attach(ndev);
diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 3b4d4696afe96,ef65cf511f3e2..d786527185999
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@@ -262,10 -262,10 +262,10 @@@ static int tc_init(struct stmmac_priv *
        unsigned int count;
        int ret, i;
  
-       if (dma_cap->l3l4fnum) {
-               priv->flow_entries_max = dma_cap->l3l4fnum;
+       priv->flow_entries_max = dma_cap->l3l4fnum;
+       if (priv->flow_entries_max) {
                priv->flow_entries = devm_kcalloc(priv->device,
-                                                 dma_cap->l3l4fnum,
+                                                 priv->flow_entries_max,
                                                  sizeof(*priv->flow_entries),
                                                  GFP_KERNEL);
                if (!priv->flow_entries)
@@@ -981,7 -981,7 +981,7 @@@ static int tc_taprio_configure(struct s
        if (qopt->cmd == TAPRIO_CMD_DESTROY)
                goto disable;
  
 -      if (qopt->num_entries >= dep)
 +      if (qopt->num_entries > dep)
                return -EINVAL;
        if (!qopt->cycle_time)
                return -ERANGE;
@@@ -1012,7 -1012,7 +1012,7 @@@
                s64 delta_ns = qopt->entries[i].interval;
                u32 gates = qopt->entries[i].gate_mask;
  
 -              if (delta_ns > GENMASK(wid, 0))
 +              if (delta_ns > GENMASK(wid - 1, 0))
                        return -ERANGE;
                if (gates > GENMASK(31 - wid, 0))
                        return -ERANGE;
diff --combined drivers/net/usb/usbnet.c
index 697cd9d866d3d,62a85dbad31a5..f3087fb62f4f8
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@@ -189,7 -189,7 +189,7 @@@ static bool usbnet_needs_usb_name_forma
                 is_local_ether_addr(net->dev_addr));
  }
  
- static void intr_complete (struct urb *urb)
+ static void intr_complete(struct urb *urb)
  {
        struct usbnet   *dev = urb->context;
        int             status = urb->status;
@@@ -221,7 -221,7 +221,7 @@@
                          "intr resubmit --> %d\n", status);
  }
  
- static int init_status (struct usbnet *dev, struct usb_interface *intf)
+ static int init_status(struct usbnet *dev, struct usb_interface *intf)
  {
        char            *buf = NULL;
        unsigned        pipe = 0;
@@@ -326,7 -326,7 +326,7 @@@ static void __usbnet_status_stop_force(
   * Some link protocols batch packets, so their rx_fixup paths
   * can return clones as well as just modify the original skb.
   */
- void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
+ void usbnet_skb_return(struct usbnet *dev, struct sk_buff *skb)
  {
        struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats);
        unsigned long flags;
@@@ -396,7 -396,7 +396,7 @@@ EXPORT_SYMBOL_GPL(usbnet_update_max_qle
   *
   *-------------------------------------------------------------------------*/
  
- int usbnet_change_mtu (struct net_device *net, int new_mtu)
+ int usbnet_change_mtu(struct net_device *net, int new_mtu)
  {
        struct usbnet   *dev = netdev_priv(net);
        int             ll_mtu = new_mtu + net->hard_header_len;
@@@ -472,7 -472,7 +472,7 @@@ static enum skb_state defer_bh(struct u
   * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
   * but tasklet_schedule() doesn't.  hope the failure is rare.
   */
- void usbnet_defer_kevent (struct usbnet *dev, int work)
+ void usbnet_defer_kevent(struct usbnet *dev, int work)
  {
        set_bit (work, &dev->flags);
        if (!usbnet_going_away(dev)) {
@@@ -489,9 -489,9 +489,9 @@@ EXPORT_SYMBOL_GPL(usbnet_defer_kevent)
  
  /*-------------------------------------------------------------------------*/
  
- static void rx_complete (struct urb *urb);
+ static void rx_complete(struct urb *urb);
  
- static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+ static int rx_submit(struct usbnet *dev, struct urb *urb, gfp_t flags)
  {
        struct sk_buff          *skb;
        struct skb_data         *entry;
@@@ -597,7 -597,7 +597,7 @@@ static inline int rx_process(struct usb
  
  /*-------------------------------------------------------------------------*/
  
- static void rx_complete (struct urb *urb)
+ static void rx_complete(struct urb *urb)
  {
        struct sk_buff          *skb = (struct sk_buff *) urb->context;
        struct skb_data         *entry = (struct skb_data *) skb->cb;
@@@ -728,7 -728,7 +728,7 @@@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_r
  
  // unlink pending rx/tx; completion handlers do all other cleanup
  
- static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
+ static int unlink_urbs(struct usbnet *dev, struct sk_buff_head *q)
  {
        unsigned long           flags;
        struct sk_buff          *skb;
@@@ -823,7 -823,7 +823,7 @@@ static void usbnet_terminate_urbs(struc
        remove_wait_queue(&dev->wait, &wait);
  }
  
- int usbnet_stop (struct net_device *net)
+ int usbnet_stop(struct net_device *net)
  {
        struct usbnet           *dev = netdev_priv(net);
        const struct driver_info *info = dev->driver_info;
@@@ -892,7 -892,7 +892,7 @@@ EXPORT_SYMBOL_GPL(usbnet_stop)
  
  // precondition: never called in_interrupt
  
- int usbnet_open (struct net_device *net)
+ int usbnet_open(struct net_device *net)
  {
        struct usbnet           *dev = netdev_priv(net);
        int                     retval;
@@@ -1048,7 -1048,7 +1048,7 @@@ int usbnet_set_link_ksettings_mii(struc
  }
  EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings_mii);
  
- u32 usbnet_get_link (struct net_device *net)
+ u32 usbnet_get_link(struct net_device *net)
  {
        struct usbnet *dev = netdev_priv(net);
  
@@@ -1076,7 -1076,7 +1076,7 @@@ int usbnet_nway_reset(struct net_devic
  }
  EXPORT_SYMBOL_GPL(usbnet_nway_reset);
  
- void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
+ void usbnet_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
  {
        struct usbnet *dev = netdev_priv(net);
  
@@@ -1087,7 -1087,7 +1087,7 @@@
  }
  EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
  
- u32 usbnet_get_msglevel (struct net_device *net)
+ u32 usbnet_get_msglevel(struct net_device *net)
  {
        struct usbnet *dev = netdev_priv(net);
  
@@@ -1095,7 -1095,7 +1095,7 @@@
  }
  EXPORT_SYMBOL_GPL(usbnet_get_msglevel);
  
- void usbnet_set_msglevel (struct net_device *net, u32 level)
+ void usbnet_set_msglevel(struct net_device *net, u32 level)
  {
        struct usbnet *dev = netdev_priv(net);
  
@@@ -1166,7 -1166,7 +1166,7 @@@ static void __handle_set_rx_mode(struc
   * especially now that control transfers can be queued.
   */
  static void
- usbnet_deferred_kevent (struct work_struct *work)
+ usbnet_deferred_kevent(struct work_struct *work)
  {
        struct usbnet           *dev =
                container_of(work, struct usbnet, kevent);
@@@ -1277,7 -1277,7 +1277,7 @@@ skip_reset
  
  /*-------------------------------------------------------------------------*/
  
- static void tx_complete (struct urb *urb)
+ static void tx_complete(struct urb *urb)
  {
        struct sk_buff          *skb = (struct sk_buff *) urb->context;
        struct skb_data         *entry = (struct skb_data *) skb->cb;
@@@ -1332,7 -1332,7 +1332,7 @@@
  
  /*-------------------------------------------------------------------------*/
  
- void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
+ void usbnet_tx_timeout(struct net_device *net, unsigned int txqueue)
  {
        struct usbnet           *dev = netdev_priv(net);
  
@@@ -1382,8 -1382,7 +1382,7 @@@ static int build_dma_sg(const struct sk
        return 1;
  }
  
- netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
-                                    struct net_device *net)
+ netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, struct net_device *net)
  {
        struct usbnet           *dev = netdev_priv(net);
        unsigned int                    length;
@@@ -1561,7 -1560,7 +1560,7 @@@ static inline void usb_free_skb(struct 
  
  // work (work deferred from completions, in_irq) or timer
  
- static void usbnet_bh (struct timer_list *t)
+ static void usbnet_bh(struct timer_list *t)
  {
        struct usbnet           *dev = timer_container_of(dev, t, delay);
        struct sk_buff          *skb;
@@@ -1636,7 -1635,7 +1635,7 @@@ static void usbnet_bh_work(struct work_
  
  // precondition: never called in_interrupt
  
- void usbnet_disconnect (struct usb_interface *intf)
+ void usbnet_disconnect(struct usb_interface *intf)
  {
        struct usbnet           *dev;
        struct usb_device       *xdev;
@@@ -1659,8 -1658,6 +1658,8 @@@
        net = dev->net;
        unregister_netdev (net);
  
 +      cancel_work_sync(&dev->kevent);
 +
        while ((urb = usb_get_from_anchor(&dev->deferred))) {
                dev_kfree_skb(urb->context);
                kfree(urb->sg);
@@@ -1702,7 -1699,7 +1701,7 @@@ static const struct device_type wwan_ty
  };
  
  int
- usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
  {
        struct usbnet                   *dev;
        struct net_device               *net;
@@@ -1909,7 -1906,7 +1908,7 @@@ EXPORT_SYMBOL_GPL(usbnet_probe)
   * resume only when the last interface is resumed
   */
  
- int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
+ int usbnet_suspend(struct usb_interface *intf, pm_message_t message)
  {
        struct usbnet           *dev = usb_get_intfdata(intf);
  
@@@ -1942,7 -1939,7 +1941,7 @@@
  }
  EXPORT_SYMBOL_GPL(usbnet_suspend);
  
- int usbnet_resume (struct usb_interface *intf)
+ int usbnet_resume(struct usb_interface *intf)
  {
        struct usbnet           *dev = usb_get_intfdata(intf);
        struct sk_buff          *skb;
diff --combined include/net/sock.h
index ff7d49af16193,c7e58b8e8a907..384a97248dc93
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@@ -118,6 -118,7 +118,7 @@@ typedef __u64 __bitwise __addrpair
   *    @skc_reuseport: %SO_REUSEPORT setting
   *    @skc_ipv6only: socket is IPV6 only
   *    @skc_net_refcnt: socket is using net ref counting
+  *    @skc_bypass_prot_mem: bypass the per-protocol memory accounting for skb
   *    @skc_bound_dev_if: bound device index if != 0
   *    @skc_bind_node: bind hash linkage for various protocol lookup tables
   *    @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
@@@ -174,6 -175,7 +175,7 @@@ struct sock_common 
        unsigned char           skc_reuseport:1;
        unsigned char           skc_ipv6only:1;
        unsigned char           skc_net_refcnt:1;
+       unsigned char           skc_bypass_prot_mem:1;
        int                     skc_bound_dev_if;
        union {
                struct hlist_node       skc_bind_node;
@@@ -313,6 -315,7 +315,7 @@@ struct sk_filter
    *   @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock
    *                 for timestamping
    *   @sk_tskey: counter to disambiguate concurrent tstamp requests
+   *   @sk_tx_queue_mapping_jiffies: time in jiffies of last 
@sk_tx_queue_mapping refresh.
    *   @sk_zckey: counter to order MSG_ZEROCOPY notifications
    *   @sk_socket: Identd and reporting IO signals
    *   @sk_user_data: RPC layer private data. Write-protected by 
@sk_callback_lock.
@@@ -380,6 -383,7 +383,7 @@@ struct sock 
  #define sk_reuseport          __sk_common.skc_reuseport
  #define sk_ipv6only           __sk_common.skc_ipv6only
  #define sk_net_refcnt         __sk_common.skc_net_refcnt
+ #define sk_bypass_prot_mem    __sk_common.skc_bypass_prot_mem
  #define sk_bound_dev_if               __sk_common.skc_bound_dev_if
  #define sk_bind_node          __sk_common.skc_bind_node
  #define sk_prot                       __sk_common.skc_prot
@@@ -485,6 -489,7 +489,7 @@@
        unsigned long           sk_pacing_rate; /* bytes per second */
        atomic_t                sk_zckey;
        atomic_t                sk_tskey;
+       unsigned long           sk_tx_queue_mapping_jiffies;
        __cacheline_group_end(sock_write_tx);
  
        __cacheline_group_begin(sock_read_tx);
@@@ -828,11 -833,9 +833,9 @@@ static inline bool sk_del_node_init(str
  {
        bool rc = __sk_del_node_init(sk);
  
-       if (rc) {
-               /* paranoid for a while -acme */
-               WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
+       if (rc)
                __sock_put(sk);
-       }
+ 
        return rc;
  }
  #define sk_del_node_init_rcu(sk)      sk_del_node_init(sk)
@@@ -850,14 -853,25 +853,25 @@@ static inline bool sk_nulls_del_node_in
  {
        bool rc = __sk_nulls_del_node_init_rcu(sk);
  
-       if (rc) {
-               /* paranoid for a while -acme */
-               WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
+       if (rc)
                __sock_put(sk);
-       }
+ 
        return rc;
  }
  
+ static inline bool sk_nulls_replace_node_init_rcu(struct sock *old,
+                                                 struct sock *new)
+ {
+       if (sk_hashed(old)) {
+               hlist_nulls_replace_init_rcu(&old->sk_nulls_node,
+                                            &new->sk_nulls_node);
+               __sock_put(old);
+               return true;
+       }
+ 
+       return false;
+ }
+ 
  static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
  {
        hlist_add_head(&sk->sk_node, list);
@@@ -1808,7 -1822,12 +1822,12 @@@ struct sock *sk_alloc(struct net *net, 
  void sk_free(struct sock *sk);
  void sk_net_refcnt_upgrade(struct sock *sk);
  void sk_destruct(struct sock *sk);
- struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
+ struct sock *sk_clone(const struct sock *sk, const gfp_t priority, bool lock);
+ 
+ static inline struct sock *sk_clone_lock(const struct sock *sk, const gfp_t 
priority)
+ {
+       return sk_clone(sk, priority, true);
+ }
  
  struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
                             gfp_t priority);
@@@ -1992,7 -2011,15 +2011,15 @@@ static inline void sk_tx_queue_set(stru
        /* Paired with READ_ONCE() in sk_tx_queue_get() and
         * other WRITE_ONCE() because socket lock might be not held.
         */
-       WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
+       if (READ_ONCE(sk->sk_tx_queue_mapping) != tx_queue) {
+               WRITE_ONCE(sk->sk_tx_queue_mapping, tx_queue);
+               WRITE_ONCE(sk->sk_tx_queue_mapping_jiffies, jiffies);
+               return;
+       }
+ 
+       /* Refresh sk_tx_queue_mapping_jiffies if too old. */
+       if (time_is_before_jiffies(READ_ONCE(sk->sk_tx_queue_mapping_jiffies) + 
HZ))
+               WRITE_ONCE(sk->sk_tx_queue_mapping_jiffies, jiffies);
  }
  
  #define NO_QUEUE_MAPPING      USHRT_MAX
@@@ -2005,19 -2032,7 +2032,7 @@@ static inline void sk_tx_queue_clear(st
        WRITE_ONCE(sk->sk_tx_queue_mapping, NO_QUEUE_MAPPING);
  }
  
- static inline int sk_tx_queue_get(const struct sock *sk)
- {
-       if (sk) {
-               /* Paired with WRITE_ONCE() in sk_tx_queue_clear()
-                * and sk_tx_queue_set().
-                */
-               int val = READ_ONCE(sk->sk_tx_queue_mapping);
- 
-               if (val != NO_QUEUE_MAPPING)
-                       return val;
-       }
-       return -1;
- }
+ int sk_tx_queue_get(const struct sock *sk);
  
  static inline void __sk_rx_queue_set(struct sock *sk,
                                     const struct sk_buff *skb,
@@@ -2303,6 -2318,7 +2318,7 @@@ static inline int skb_copy_to_page_noca
        return 0;
  }
  
+ #define SK_WMEM_ALLOC_BIAS 1
  /**
   * sk_wmem_alloc_get - returns write allocations
   * @sk: socket
@@@ -2311,7 -2327,7 +2327,7 @@@
   */
  static inline int sk_wmem_alloc_get(const struct sock *sk)
  {
-       return refcount_read(&sk->sk_wmem_alloc) - 1;
+       return refcount_read(&sk->sk_wmem_alloc) - SK_WMEM_ALLOC_BIAS;
  }
  
  /**
@@@ -2596,12 -2612,16 +2612,16 @@@ static inline struct page_frag *sk_page
  
  bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
  
+ static inline bool __sock_writeable(const struct sock *sk, int wmem_alloc)
+ {
+       return wmem_alloc < (READ_ONCE(sk->sk_sndbuf) >> 1);
+ }
  /*
   *    Default write policy as shown to user space via poll/select/SIGIO
   */
  static inline bool sock_writeable(const struct sock *sk)
  {
-       return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 
1);
+       return __sock_writeable(sk, refcount_read(&sk->sk_wmem_alloc));
  }
  
  static inline gfp_t gfp_any(void)
@@@ -2635,12 -2655,8 +2655,12 @@@ static inline bool mem_cgroup_sk_under_
  #endif /* CONFIG_MEMCG_V1 */
  
        do {
 -              if (time_before64(get_jiffies_64(), 
mem_cgroup_get_socket_pressure(memcg)))
 +              if (time_before64(get_jiffies_64(),
 +                                mem_cgroup_get_socket_pressure(memcg))) {
 +                      memcg_memory_event(mem_cgroup_from_sk(sk),
 +                                         MEMCG_SOCK_THROTTLED);
                        return true;
 +              }
        } while ((memcg = parent_mem_cgroup(memcg)));
  
        return false;
diff --combined include/net/tcp.h
index ab20f549b8f91,190b3714e93b3..4fd6d8d1230d0
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@@ -303,6 -303,9 +303,9 @@@ static inline bool tcp_under_memory_pre
            mem_cgroup_sk_under_memory_pressure(sk))
                return true;
  
+       if (sk->sk_bypass_prot_mem)
+               return false;
+ 
        return READ_ONCE(tcp_memory_pressure);
  }
  /*
@@@ -370,7 -373,7 +373,7 @@@ void tcp_delack_timer_handler(struct so
  int tcp_ioctl(struct sock *sk, int cmd, int *karg);
  enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff 
*skb);
  void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
 -void tcp_rcvbuf_grow(struct sock *sk);
 +void tcp_rcvbuf_grow(struct sock *sk, u32 newval);
  void tcp_rcv_space_adjust(struct sock *sk);
  int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
  void tcp_twsk_destructor(struct sock *sk);
@@@ -461,6 -464,8 +464,8 @@@ enum skb_drop_reason tcp_child_process(
  void tcp_enter_loss(struct sock *sk);
  void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int 
newly_lost, int flag);
  void tcp_clear_retrans(struct tcp_sock *tp);
+ void tcp_update_pacing_rate(struct sock *sk);
+ void tcp_set_rto(struct sock *sk);
  void tcp_update_metrics(struct sock *sk);
  void tcp_init_metrics(struct sock *sk);
  void tcp_metrics_init(void);
@@@ -1896,13 -1901,6 +1901,6 @@@ struct tcp6_pseudohdr 
        __be32          protocol;       /* including padding */
  };
  
- union tcp_md5sum_block {
-       struct tcp4_pseudohdr ip4;
- #if IS_ENABLED(CONFIG_IPV6)
-       struct tcp6_pseudohdr ip6;
- #endif
- };
- 
  /*
   * struct tcp_sigpool - per-CPU pool of ahash_requests
   * @scratch: per-CPU temporary area, that can be used between
@@@ -1937,8 -1935,8 +1935,8 @@@ int tcp_sigpool_start(unsigned int id, 
  void tcp_sigpool_end(struct tcp_sigpool *c);
  size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
  /* - functions */
- int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
-                       const struct sock *sk, const struct sk_buff *skb);
+ void tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
+                        const struct sock *sk, const struct sk_buff *skb);
  int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
                   int family, u8 prefixlen, int l3index, u8 flags,
                   const u8 *newkey, u8 newkeylen);
@@@ -1997,13 -1995,10 +1995,10 @@@ static inline void tcp_md5_destruct_soc
  }
  #endif
  
- int tcp_md5_alloc_sigpool(void);
- void tcp_md5_release_sigpool(void);
- void tcp_md5_add_sigpool(void);
- extern int tcp_md5_sigpool_id;
- 
- int tcp_md5_hash_key(struct tcp_sigpool *hp,
-                    const struct tcp_md5sig_key *key);
+ struct md5_ctx;
+ void tcp_md5_hash_skb_data(struct md5_ctx *ctx, const struct sk_buff *skb,
+                          unsigned int header_len);
+ void tcp_md5_hash_key(struct md5_ctx *ctx, const struct tcp_md5sig_key *key);
  
  /* From tcp_fastopen.c */
  void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
@@@ -2353,7 -2348,7 +2348,7 @@@ struct tcp_sock_af_ops 
  #ifdef CONFIG_TCP_MD5SIG
        struct tcp_md5sig_key   *(*md5_lookup) (const struct sock *sk,
                                                const struct sock *addr_sk);
-       int             (*calc_md5_hash)(char *location,
+       void            (*calc_md5_hash)(char *location,
                                         const struct tcp_md5sig_key *md5,
                                         const struct sock *sk,
                                         const struct sk_buff *skb);
@@@ -2381,7 -2376,7 +2376,7 @@@ struct tcp_request_sock_ops 
  #ifdef CONFIG_TCP_MD5SIG
        struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
                                                 const struct sock *addr_sk);
-       int             (*calc_md5_hash) (char *location,
+       void            (*calc_md5_hash) (char *location,
                                          const struct tcp_md5sig_key *md5,
                                          const struct sock *sk,
                                          const struct sk_buff *skb);
diff --combined include/net/tls.h
index c7bcdb3afad75,f2af113728aae..ebd2550280ae2
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@@ -53,6 -53,8 +53,8 @@@ struct tls_rec
  
  /* Maximum data size carried in a TLS record */
  #define TLS_MAX_PAYLOAD_SIZE          ((size_t)1 << 14)
+ /* Minimum record size limit as per RFC8449 */
+ #define TLS_MIN_RECORD_SIZE_LIM               ((size_t)1 << 6)
  
  #define TLS_HEADER_SIZE                       5
  #define TLS_NONCE_OFFSET              TLS_HEADER_SIZE
@@@ -226,6 -228,7 +228,7 @@@ struct tls_context 
        u8 rx_conf:3;
        u8 zerocopy_sendfile:1;
        u8 rx_no_pad:1;
+       u16 tx_max_payload_len;
  
        int (*push_pending_record)(struct sock *sk, int flags);
        void (*sk_write_space)(struct sock *sk);
@@@ -451,26 -454,25 +454,26 @@@ static inline void tls_offload_rx_resyn
  
  /* Log all TLS record header TCP sequences in [seq, seq+len] */
  static inline void
 -tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 
len)
 +tls_offload_rx_resync_async_request_start(struct tls_offload_resync_async 
*resync_async,
 +                                        __be32 seq, u16 len)
  {
 -      struct tls_context *tls_ctx = tls_get_ctx(sk);
 -      struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
 -
 -      atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
 +      atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) |
                     ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
 -      rx_ctx->resync_async->loglen = 0;
 -      rx_ctx->resync_async->rcd_delta = 0;
 +      resync_async->loglen = 0;
 +      resync_async->rcd_delta = 0;
  }
  
  static inline void
 -tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
 +tls_offload_rx_resync_async_request_end(struct tls_offload_resync_async 
*resync_async,
 +                                      __be32 seq)
  {
 -      struct tls_context *tls_ctx = tls_get_ctx(sk);
 -      struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
 +      atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
 +}
  
 -      atomic64_set(&rx_ctx->resync_async->req,
 -                   ((u64)ntohl(seq) << 32) | RESYNC_REQ);
 +static inline void
 +tls_offload_rx_resync_async_request_cancel(struct tls_offload_resync_async 
*resync_async)
 +{
 +      atomic64_set(&resync_async->req, 0);
  }
  
  static inline void
diff --combined net/core/filter.c
index fa06c5a08e22f,16105f52927da..d180f14d26500
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@@ -3877,8 -3877,7 +3877,8 @@@ static inline int __bpf_skb_change_head
        u32 new_len = skb->len + head_room;
        int ret;
  
 -      if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
 +      if (unlikely(flags || (int)head_room < 0 ||
 +                   (!skb_is_gso(skb) && new_len > max_len) ||
                     new_len < skb->len))
                return -EINVAL;
  
@@@ -5734,6 -5733,77 +5734,77 @@@ static const struct bpf_func_proto bpf_
        .arg5_type      = ARG_CONST_SIZE,
  };
  
+ static int sk_bpf_set_get_bypass_prot_mem(struct sock *sk,
+                                         char *optval, int optlen,
+                                         bool getopt)
+ {
+       int val;
+ 
+       if (optlen != sizeof(int))
+               return -EINVAL;
+ 
+       if (!sk_has_account(sk))
+               return -EOPNOTSUPP;
+ 
+       if (getopt) {
+               *(int *)optval = sk->sk_bypass_prot_mem;
+               return 0;
+       }
+ 
+       val = *(int *)optval;
+       if (val < 0 || val > 1)
+               return -EINVAL;
+ 
+       sk->sk_bypass_prot_mem = val;
+       return 0;
+ }
+ 
+ BPF_CALL_5(bpf_sock_create_setsockopt, struct sock *, sk, int, level,
+          int, optname, char *, optval, int, optlen)
+ {
+       if (level == SOL_SOCKET && optname == SK_BPF_BYPASS_PROT_MEM)
+               return sk_bpf_set_get_bypass_prot_mem(sk, optval, optlen, 
false);
+ 
+       return __bpf_setsockopt(sk, level, optname, optval, optlen);
+ }
+ 
+ static const struct bpf_func_proto bpf_sock_create_setsockopt_proto = {
+       .func           = bpf_sock_create_setsockopt,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+       .arg4_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
+       .arg5_type      = ARG_CONST_SIZE,
+ };
+ 
+ BPF_CALL_5(bpf_sock_create_getsockopt, struct sock *, sk, int, level,
+          int, optname, char *, optval, int, optlen)
+ {
+       if (level == SOL_SOCKET && optname == SK_BPF_BYPASS_PROT_MEM) {
+               int err = sk_bpf_set_get_bypass_prot_mem(sk, optval, optlen, 
true);
+ 
+               if (err)
+                       memset(optval, 0, optlen);
+ 
+               return err;
+       }
+ 
+       return __bpf_getsockopt(sk, level, optname, optval, optlen);
+ }
+ 
+ static const struct bpf_func_proto bpf_sock_create_getsockopt_proto = {
+       .func           = bpf_sock_create_getsockopt,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+       .arg4_type      = ARG_PTR_TO_UNINIT_MEM,
+       .arg5_type      = ARG_CONST_SIZE,
+ };
+ 
  BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
           int, level, int, optname, char *, optval, int, optlen)
  {
@@@ -8063,6 -8133,20 +8134,20 @@@ sock_filter_func_proto(enum bpf_func_i
                return &bpf_sk_storage_get_cg_sock_proto;
        case BPF_FUNC_ktime_get_coarse_ns:
                return &bpf_ktime_get_coarse_ns_proto;
+       case BPF_FUNC_setsockopt:
+               switch (prog->expected_attach_type) {
+               case BPF_CGROUP_INET_SOCK_CREATE:
+                       return &bpf_sock_create_setsockopt_proto;
+               default:
+                       return NULL;
+               }
+       case BPF_FUNC_getsockopt:
+               switch (prog->expected_attach_type) {
+               case BPF_CGROUP_INET_SOCK_CREATE:
+                       return &bpf_sock_create_getsockopt_proto;
+               default:
+                       return NULL;
+               }
        default:
                return bpf_base_func_proto(func_id, prog);
        }
diff --combined net/ipv4/tcp_input.c
index e4a979b75cc66,ff19f6e54d55c..6db1d4c36a88b
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@@ -891,27 -891,18 +891,27 @@@ static inline void tcp_rcv_rtt_measure_
        }
  }
  
 -void tcp_rcvbuf_grow(struct sock *sk)
 +void tcp_rcvbuf_grow(struct sock *sk, u32 newval)
  {
        const struct net *net = sock_net(sk);
        struct tcp_sock *tp = tcp_sk(sk);
 -      int rcvwin, rcvbuf, cap;
 +      u32 rcvwin, rcvbuf, cap, oldval;
 +      u64 grow;
 +
 +      oldval = tp->rcvq_space.space;
 +      tp->rcvq_space.space = newval;
  
        if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
            (sk->sk_userlocks & SOCK_RCVBUF_LOCK))
                return;
  
 +      /* DRS is always one RTT late. */
 +      rcvwin = newval << 1;
 +
        /* slow start: allow the sender to double its rate. */
 -      rcvwin = tp->rcvq_space.space << 1;
 +      grow = (u64)rcvwin * (newval - oldval);
 +      do_div(grow, oldval);
 +      rcvwin += grow << 1;
  
        if (!RB_EMPTY_ROOT(&tp->out_of_order_queue))
                rcvwin += TCP_SKB_CB(tp->ooo_last_skb)->end_seq - tp->rcv_nxt;
@@@ -937,9 -928,15 +937,15 @@@ void tcp_rcv_space_adjust(struct sock *
  
        trace_tcp_rcv_space_adjust(sk);
  
-       tcp_mstamp_refresh(tp);
+       if (unlikely(!tp->rcv_rtt_est.rtt_us))
+               return;
+ 
+       /* We do not refresh tp->tcp_mstamp here.
+        * Some platforms have expensive ktime_get() implementations.
+        * Using the last cached value is enough for DRS.
+        */
        time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
-       if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
+       if (time < (tp->rcv_rtt_est.rtt_us >> 3))
                return;
  
        /* Number of bytes copied to user in last RTT */
@@@ -952,7 -949,9 +958,7 @@@
  
        trace_tcp_rcvbuf_grow(sk, time);
  
 -      tp->rcvq_space.space = copied;
 -
 -      tcp_rcvbuf_grow(sk);
 +      tcp_rcvbuf_grow(sk, copied);
  
  new_measure:
        tp->rcvq_space.seq = tp->copied_seq;
@@@ -1102,7 -1101,7 +1108,7 @@@ static void tcp_rtt_estimator(struct so
        tp->srtt_us = max(1U, srtt);
  }
  
- static void tcp_update_pacing_rate(struct sock *sk)
+ void tcp_update_pacing_rate(struct sock *sk)
  {
        const struct tcp_sock *tp = tcp_sk(sk);
        u64 rate;
@@@ -1139,7 -1138,7 +1145,7 @@@
  /* Calculate rto without backoff.  This is the second half of Van Jacobson's
   * routine referred to above.
   */
- static void tcp_set_rto(struct sock *sk)
+ void tcp_set_rto(struct sock *sk)
  {
        const struct tcp_sock *tp = tcp_sk(sk);
        /* Old crap is replaced with new one. 8)
@@@ -5277,7 -5276,7 +5283,7 @@@ end
        }
        /* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */
        if (sk->sk_socket)
 -              tcp_rcvbuf_grow(sk);
 +              tcp_rcvbuf_grow(sk, tp->rcvq_space.space);
  }
  
  static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
diff --combined net/mptcp/protocol.c
index 2d6b8de35c449,94a5f6dcc5775..d568575cdcb59
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@@ -194,26 -194,17 +194,26 @@@ static bool mptcp_ooo_try_coalesce(stru
   * - mptcp does not maintain a msk-level window clamp
   * - returns true when  the receive buffer is actually updated
   */
 -static bool mptcp_rcvbuf_grow(struct sock *sk)
 +static bool mptcp_rcvbuf_grow(struct sock *sk, u32 newval)
  {
        struct mptcp_sock *msk = mptcp_sk(sk);
        const struct net *net = sock_net(sk);
 -      int rcvwin, rcvbuf, cap;
 +      u32 rcvwin, rcvbuf, cap, oldval;
 +      u64 grow;
  
 +      oldval = msk->rcvq_space.space;
 +      msk->rcvq_space.space = newval;
        if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
            (sk->sk_userlocks & SOCK_RCVBUF_LOCK))
                return false;
  
 -      rcvwin = msk->rcvq_space.space << 1;
 +      /* DRS is always one RTT late. */
 +      rcvwin = newval << 1;
 +
 +      /* slow start: allow the sender to double its rate. */
 +      grow = (u64)rcvwin * (newval - oldval);
 +      do_div(grow, oldval);
 +      rcvwin += grow << 1;
  
        if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
                rcvwin += MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq - 
msk->ack_seq;
@@@ -343,7 -334,7 +343,7 @@@ end
        skb_set_owner_r(skb, sk);
        /* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */
        if (sk->sk_socket)
 -              mptcp_rcvbuf_grow(sk);
 +              mptcp_rcvbuf_grow(sk, msk->rcvq_space.space);
  }
  
  static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset,
@@@ -1007,7 -998,7 +1007,7 @@@ static void __mptcp_clean_una(struct so
                        if (WARN_ON_ONCE(!msk->recovery))
                                break;
  
 -                      WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
 +                      msk->first_pending = mptcp_send_next(sk);
                }
  
                dfrag_clear(sk, dfrag);
@@@ -1074,11 -1065,12 +1074,12 @@@ static void mptcp_enter_memory_pressure
        mptcp_for_each_subflow(msk, subflow) {
                struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
  
-               if (first)
+               if (first && !ssk->sk_bypass_prot_mem) {
                        tcp_enter_memory_pressure(ssk);
-               sk_stream_moderate_sndbuf(ssk);
+                       first = false;
+               }
  
-               first = false;
+               sk_stream_moderate_sndbuf(ssk);
        }
        __mptcp_sync_sndbuf(sk);
  }
@@@ -1299,12 -1291,7 +1300,12 @@@ alloc_skb
        if (copy == 0) {
                u64 snd_una = READ_ONCE(msk->snd_una);
  
 -              if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) {
 +              /* No need for zero probe if there are any data pending
 +               * either at the msk or ssk level; skb is the current write
 +               * queue tail and can be empty at this point.
 +               */
 +              if (snd_una != msk->snd_nxt || skb->len ||
 +                  skb != tcp_send_head(ssk)) {
                        tcp_remove_empty_skb(ssk);
                        return 0;
                }
@@@ -1355,7 -1342,6 +1356,7 @@@
                 mpext->dsn64);
  
        if (zero_window_probe) {
 +              MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_WINPROBE);
                mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
                mpext->frozen = 1;
                if (READ_ONCE(msk->csum_enabled))
@@@ -1558,7 -1544,7 +1559,7 @@@ static int __subflow_push_pending(struc
  
                        mptcp_update_post_push(msk, dfrag, ret);
                }
 -              WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
 +              msk->first_pending = mptcp_send_next(sk);
  
                if (msk->snd_burst <= 0 ||
                    !sk_stream_memory_free(ssk) ||
@@@ -1918,7 -1904,7 +1919,7 @@@ static int mptcp_sendmsg(struct sock *s
                        get_page(dfrag->page);
                        list_add_tail(&dfrag->list, &msk->rtx_queue);
                        if (!msk->first_pending)
 -                              WRITE_ONCE(msk->first_pending, dfrag);
 +                              msk->first_pending = dfrag;
                }
                pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", 
msk,
                         dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
@@@ -1951,36 -1937,22 +1952,36 @@@ do_error
  
  static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
  
 -static int __mptcp_recvmsg_mskq(struct sock *sk,
 -                              struct msghdr *msg,
 -                              size_t len, int flags,
 +static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg,
 +                              size_t len, int flags, int copied_total,
                                struct scm_timestamping_internal *tss,
                                int *cmsg_flags)
  {
        struct mptcp_sock *msk = mptcp_sk(sk);
        struct sk_buff *skb, *tmp;
 +      int total_data_len = 0;
        int copied = 0;
  
        skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) {
 -              u32 offset = MPTCP_SKB_CB(skb)->offset;
 +              u32 delta, offset = MPTCP_SKB_CB(skb)->offset;
                u32 data_len = skb->len - offset;
 -              u32 count = min_t(size_t, len - copied, data_len);
 +              u32 count;
                int err;
  
 +              if (flags & MSG_PEEK) {
 +                      /* skip already peeked skbs */
 +                      if (total_data_len + data_len <= copied_total) {
 +                              total_data_len += data_len;
 +                              continue;
 +                      }
 +
 +                      /* skip the already peeked data in the current skb */
 +                      delta = copied_total - total_data_len;
 +                      offset += delta;
 +                      data_len -= delta;
 +              }
 +
 +              count = min_t(size_t, len - copied, data_len);
                if (!(flags & MSG_TRUNC)) {
                        err = skb_copy_datagram_msg(skb, offset, msg, count);
                        if (unlikely(err < 0)) {
@@@ -1997,14 -1969,16 +1998,14 @@@
  
                copied += count;
  
 -              if (count < data_len) {
 -                      if (!(flags & MSG_PEEK)) {
 +              if (!(flags & MSG_PEEK)) {
 +                      msk->bytes_consumed += count;
 +                      if (count < data_len) {
                                MPTCP_SKB_CB(skb)->offset += count;
                                MPTCP_SKB_CB(skb)->map_seq += count;
 -                              msk->bytes_consumed += count;
 +                              break;
                        }
 -                      break;
 -              }
  
 -              if (!(flags & MSG_PEEK)) {
                        /* avoid the indirect call, we know the destructor is 
sock_rfree */
                        skb->destructor = NULL;
                        skb->sk = NULL;
@@@ -2012,6 -1986,7 +2013,6 @@@
                        sk_mem_uncharge(sk, skb->truesize);
                        __skb_unlink(skb, &sk->sk_receive_queue);
                        skb_attempt_defer_free(skb);
 -                      msk->bytes_consumed += count;
                }
  
                if (copied >= len)
@@@ -2075,7 -2050,9 +2076,7 @@@ static void mptcp_rcv_space_adjust(stru
        if (msk->rcvq_space.copied <= msk->rcvq_space.space)
                goto new_measure;
  
 -      msk->rcvq_space.space = msk->rcvq_space.copied;
 -      if (mptcp_rcvbuf_grow(sk)) {
 -
 +      if (mptcp_rcvbuf_grow(sk, msk->rcvq_space.copied)) {
                /* Make subflows follow along.  If we do not do this, we
                 * get drops at subflow level if skbs can't be moved to
                 * the mptcp rx queue fast enough (announced rcv_win can
@@@ -2087,9 -2064,8 +2088,9 @@@
  
                        ssk = mptcp_subflow_tcp_sock(subflow);
                        slow = lock_sock_fast(ssk);
 -                      tcp_sk(ssk)->rcvq_space.space = msk->rcvq_space.copied;
 -                      tcp_rcvbuf_grow(ssk);
 +                      /* subflows can be added before tcp_init_transfer() */
 +                      if (tcp_sk(ssk)->rcvq_space.space)
 +                              tcp_rcvbuf_grow(ssk, msk->rcvq_space.copied);
                        unlock_sock_fast(ssk, slow);
                }
        }
@@@ -2208,8 -2184,7 +2209,8 @@@ static int mptcp_recvmsg(struct sock *s
        while (copied < len) {
                int err, bytes_read;
  
 -              bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, 
&tss, &cmsg_flags);
 +              bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags,
 +                                                copied, &tss, &cmsg_flags);
                if (unlikely(bytes_read < 0)) {
                        if (!copied)
                                copied = bytes_read;
@@@ -2900,7 -2875,7 +2901,7 @@@ static void __mptcp_clear_xmit(struct s
        struct mptcp_sock *msk = mptcp_sk(sk);
        struct mptcp_data_frag *dtmp, *dfrag;
  
 -      WRITE_ONCE(msk->first_pending, NULL);
 +      msk->first_pending = NULL;
        list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
                dfrag_clear(sk, dfrag);
  }
@@@ -3440,6 -3415,9 +3441,6 @@@ void __mptcp_data_acked(struct sock *sk
  
  void __mptcp_check_push(struct sock *sk, struct sock *ssk)
  {
 -      if (!mptcp_send_head(sk))
 -              return;
 -
        if (!sock_owned_by_user(sk))
                __mptcp_subflow_push_pending(sk, ssk, false);
        else
diff --combined net/tls/tls_device.c
index 71734411ff4c3,4d29b390aed90..82ea407e520a0
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@@ -373,7 -373,8 +373,8 @@@ static int tls_do_allocation(struct soc
        if (!offload_ctx->open_record) {
                if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
                                                   sk->sk_allocation))) {
-                       READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
+                       if (!sk->sk_bypass_prot_mem)
+                               
READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
                        sk_stream_moderate_sndbuf(sk);
                        return -ENOMEM;
                }
@@@ -461,7 -462,7 +462,7 @@@ static int tls_push_data(struct sock *s
        /* TLS_HEADER_SIZE is not counted as part of the TLS record, and
         * we need to leave room for an authentication tag.
         */
-       max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
+       max_open_record_len = tls_ctx->tx_max_payload_len +
                              prot->prepend_size;
        do {
                rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
@@@ -723,10 -724,8 +724,10 @@@ tls_device_rx_resync_async(struct tls_o
                /* shouldn't get to wraparound:
                 * too long in async stage, something bad happened
                 */
 -              if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
 +              if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) {
 +                      
tls_offload_rx_resync_async_request_cancel(resync_async);
                        return false;
 +              }
  
                /* asynchronous stage: log all headers seq such that
                 * req_seq <= seq <= end_seq, and wait for real resync request
diff --combined tools/testing/selftests/net/netlink-dumps.c
index faa4455815f8c,679b6c77ace7c..51129c564d0a6
--- a/tools/testing/selftests/net/netlink-dumps.c
+++ b/tools/testing/selftests/net/netlink-dumps.c
@@@ -18,7 -18,7 +18,7 @@@
  #include <linux/mqueue.h>
  #include <linux/rtnetlink.h>
  
 -#include "../kselftest_harness.h"
 +#include "kselftest_harness.h"
  
  #include <ynl.h>
  
@@@ -143,6 -143,7 +143,7 @@@ TEST(dump_extack
        EXPECT_EQ(n, -1);
        EXPECT_EQ(errno, ENOBUFS);
  
+       ret = NO_CTRL;
        for (i = 0; i < cnt; i++) {
                struct ext_ack ea = {};
  
diff --combined tools/testing/selftests/net/tls.c
index 2b6590c0e13c1,da1b50b307194..a3ef4b57eb5f8
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@@ -21,7 -21,7 +21,7 @@@
  #include <sys/socket.h>
  #include <sys/stat.h>
  
 -#include "../kselftest_harness.h"
 +#include "kselftest_harness.h"
  
  #define TLS_PAYLOAD_MAX_LEN 16384
  #define SOL_TLS 282
@@@ -2856,6 -2856,147 +2856,147 @@@ TEST_F(tls_err, oob_pressure
                EXPECT_EQ(send(self->fd2, buf, 5, MSG_OOB), 5);
  }
  
+ /*
+  * Parse a stream of TLS records and ensure that each record respects
+  * the specified @max_payload_len.
+  */
+ static size_t parse_tls_records(struct __test_metadata *_metadata,
+                               const __u8 *rx_buf, int rx_len, int overhead,
+                               __u16 max_payload_len)
+ {
+       const __u8 *rec = rx_buf;
+       size_t total_plaintext_rx = 0;
+       const __u8 rec_header_len = 5;
+ 
+       while (rec < rx_buf + rx_len) {
+               __u16 record_payload_len;
+               __u16 plaintext_len;
+ 
+               /* Sanity check that it's a TLS header for application data */
+               ASSERT_EQ(rec[0], 23);
+               ASSERT_EQ(rec[1], 0x3);
+               ASSERT_EQ(rec[2], 0x3);
+ 
+               memcpy(&record_payload_len, rec + 3, 2);
+               record_payload_len = ntohs(record_payload_len);
+               ASSERT_GE(record_payload_len, overhead);
+ 
+               plaintext_len = record_payload_len - overhead;
+               total_plaintext_rx += plaintext_len;
+ 
+               /* Plaintext must not exceed the specified limit */
+               ASSERT_LE(plaintext_len, max_payload_len);
+               rec += rec_header_len + record_payload_len;
+       }
+ 
+       return total_plaintext_rx;
+ }
+ 
+ TEST(tls_12_tx_max_payload_len)
+ {
+       struct tls_crypto_info_keys tls12;
+       int cfd, ret, fd, overhead;
+       size_t total_plaintext_rx = 0;
+       __u8 tx[1024], rx[2000];
+       __u16 limit = 128;
+       __u16 opt = 0;
+       unsigned int optlen = sizeof(opt);
+       bool notls;
+ 
+       tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_CCM_128,
+                            &tls12, 0);
+ 
+       ulp_sock_pair(_metadata, &fd, &cfd, &notls);
+ 
+       if (notls)
+               exit(KSFT_SKIP);
+ 
+       /* Don't install keys on fd, we'll parse raw records */
+       ret = setsockopt(cfd, SOL_TLS, TLS_TX, &tls12, tls12.len);
+       ASSERT_EQ(ret, 0);
+ 
+       ret = setsockopt(cfd, SOL_TLS, TLS_TX_MAX_PAYLOAD_LEN, &limit,
+                        sizeof(limit));
+       ASSERT_EQ(ret, 0);
+ 
+       ret = getsockopt(cfd, SOL_TLS, TLS_TX_MAX_PAYLOAD_LEN, &opt, &optlen);
+       EXPECT_EQ(ret, 0);
+       EXPECT_EQ(limit, opt);
+       EXPECT_EQ(optlen, sizeof(limit));
+ 
+       memset(tx, 0, sizeof(tx));
+       ASSERT_EQ(send(cfd, tx, sizeof(tx), 0), sizeof(tx));
+       close(cfd);
+ 
+       ret = recv(fd, rx, sizeof(rx), 0);
+ 
+       /*
+        * 16B tag + 8B IV -- record header (5B) is not counted but we'll
+        * need it to walk the record stream
+        */
+       overhead = 16 + 8;
+       total_plaintext_rx = parse_tls_records(_metadata, rx, ret, overhead,
+                                              limit);
+ 
+       ASSERT_EQ(total_plaintext_rx, sizeof(tx));
+       close(fd);
+ }
+ 
+ TEST(tls_12_tx_max_payload_len_open_rec)
+ {
+       struct tls_crypto_info_keys tls12;
+       int cfd, ret, fd, overhead;
+       size_t total_plaintext_rx = 0;
+       __u8 tx[1024], rx[2000];
+       __u16 tx_partial = 256;
+       __u16 og_limit = 512, limit = 128;
+       bool notls;
+ 
+       tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_CCM_128,
+                            &tls12, 0);
+ 
+       ulp_sock_pair(_metadata, &fd, &cfd, &notls);
+ 
+       if (notls)
+               exit(KSFT_SKIP);
+ 
+       /* Don't install keys on fd, we'll parse raw records */
+       ret = setsockopt(cfd, SOL_TLS, TLS_TX, &tls12, tls12.len);
+       ASSERT_EQ(ret, 0);
+ 
+       ret = setsockopt(cfd, SOL_TLS, TLS_TX_MAX_PAYLOAD_LEN, &og_limit,
+                        sizeof(og_limit));
+       ASSERT_EQ(ret, 0);
+ 
+       memset(tx, 0, sizeof(tx));
+       ASSERT_EQ(send(cfd, tx, tx_partial, MSG_MORE), tx_partial);
+ 
+       /*
+        * Changing the payload limit with a pending open record should
+        * not be allowed.
+        */
+       ret = setsockopt(cfd, SOL_TLS, TLS_TX_MAX_PAYLOAD_LEN, &limit,
+                        sizeof(limit));
+       ASSERT_EQ(ret, -1);
+       ASSERT_EQ(errno, EBUSY);
+ 
+       ASSERT_EQ(send(cfd, tx + tx_partial, sizeof(tx) - tx_partial, MSG_EOR),
+                 sizeof(tx) - tx_partial);
+       close(cfd);
+ 
+       ret = recv(fd, rx, sizeof(rx), 0);
+ 
+       /*
+        * 16B tag + 8B IV -- record header (5B) is not counted but we'll
+        * need it to walk the record stream
+        */
+       overhead = 16 + 8;
+       total_plaintext_rx = parse_tls_records(_metadata, rx, ret, overhead,
+                                              og_limit);
+       ASSERT_EQ(total_plaintext_rx, sizeof(tx));
+       close(fd);
+ }
+ 
  TEST(non_established) {
        struct tls12_crypto_info_aes_gcm_256 tls12;
        struct sockaddr_in addr;

-- 
LinuxNextTracking

Reply via email to