The following commit has been merged in the master branch:
commit 1a2352ad82b515035efe563f997ef8f5ca4f8080
Merge: 0d0eb186421d0886ac466008235f6d9eedaf918e
e5763491237ffee22d9b554febc2d00669f81dee
Author: Jakub Kicinski <[email protected]>
Date: Thu Oct 16 10:53:13 2025 -0700
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.18-rc4).
No conflicts, adjacent changes:
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
ded9813d17d3 ("net: stmmac: Consider Tx VLAN offload tag length for
maxSDU")
26ab9830beab ("net: stmmac: replace has_xxxx with core_type")
Signed-off-by: Jakub Kicinski <[email protected]>
diff --combined MAINTAINERS
index d652f4f27756e,0518f1f4f3b56..1ab7e87462993
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@@ -1997,6 -1997,10 +1997,10 @@@ F: include/uapi/linux/if_arcnet.
ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS)
M: Arnd Bergmann <[email protected]>
+ M: Krzysztof Kozlowski <[email protected]>
+ M: Alexandre Belloni <[email protected]>
+ M: Linus Walleij <[email protected]>
+ R: Drew Fustini <[email protected]>
L: [email protected] (moderated for non-subscribers)
L: [email protected]
S: Maintained
@@@ -3296,7 -3300,6 +3300,7 @@@ F: drivers/*/*/*rockchip
F: drivers/*/*rockchip*
F: drivers/clk/rockchip/
F: drivers/i2c/busses/i2c-rk3x.c
+F: drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
F: sound/soc/rockchip/
N: rockchip
@@@ -5126,6 -5129,7 +5130,6 @@@ F: Documentation/devicetree/bindings/ne
F: drivers/net/ethernet/broadcom/genet/
F: drivers/net/ethernet/broadcom/unimac.h
F: drivers/net/mdio/mdio-bcm-unimac.c
-F: include/linux/platform_data/bcmgenet.h
F: include/linux/platform_data/mdio-bcm-unimac.h
BROADCOM IPROC ARM ARCHITECTURE
@@@ -13112,6 -13116,15 +13116,15 @@@ F: include/uapi/linux/io_uring.
F: include/uapi/linux/io_uring/
F: io_uring/
+ IO_URING ZCRX
+ M: Pavel Begunkov <[email protected]>
+ L: [email protected]
+ L: [email protected]
+ T: git https://github.com/isilence/linux.git zcrx/for-next
+ T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux.git
+ S: Maintained
+ F: io_uring/zcrx.*
+
IPMI SUBSYSTEM
M: Corey Minyard <[email protected]>
L: [email protected] (moderated for non-subscribers)
@@@ -13247,10 -13260,8 +13260,8 @@@ T: git git://git.kernel.org/pub/scm/lin
F: drivers/infiniband/ulp/isert
ISDN/CMTP OVER BLUETOOTH
- M: Karsten Keil <[email protected]>
- L: [email protected] (subscribers-only)
L: [email protected]
- S: Odd Fixes
+ S: Orphan
W: http://www.isdn4linux.de
F: Documentation/isdn/
F: drivers/isdn/capi/
@@@ -13259,10 -13270,8 +13270,8 @@@ F: include/uapi/linux/isdn
F: net/bluetooth/cmtp/
ISDN/mISDN SUBSYSTEM
- M: Karsten Keil <[email protected]>
- L: [email protected] (subscribers-only)
L: [email protected]
- S: Maintained
+ S: Orphan
W: http://www.isdn4linux.de
F: drivers/isdn/Kconfig
F: drivers/isdn/Makefile
@@@ -14395,6 -14404,7 +14404,7 @@@ F: tools/memory-model
LINUX-NEXT TREE
M: Stephen Rothwell <[email protected]>
+ M: Mark Brown <[email protected]>
L: [email protected]
S: Supported
B: mailto:[email protected] and the appropriate development tree
@@@ -17437,14 -17447,6 +17447,14 @@@ S: Maintaine
F: Documentation/devicetree/bindings/net/motorcomm,yt8xxx.yaml
F: drivers/net/phy/motorcomm.c
+MOTORCOMM YT921X ETHERNET SWITCH DRIVER
+M: David Yang <[email protected]>
+L: [email protected]
+S: Maintained
+F: Documentation/devicetree/bindings/net/dsa/motorcomm,yt921x.yaml
+F: drivers/net/dsa/yt921x.*
+F: net/dsa/tag_yt921x.c
+
MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
M: Jiri Slaby <[email protected]>
S: Maintained
@@@ -21326,6 -21328,7 +21336,7 @@@ F: drivers/media/platform/qcom/venus
QUALCOMM WCN36XX WIRELESS DRIVER
M: Loic Poulain <[email protected]>
L: [email protected]
+ L: [email protected]
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
F: drivers/net/wireless/ath/wcn36xx/
diff --combined drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 81d3bdc098e63,782bb48c9f3d7..cf8abbe018402
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@@ -9429,8 -9429,7 +9429,7 @@@ static int hclge_mii_ioctl(struct hclge
/* this command reads phy id and register at the same time */
fallthrough;
case SIOCGMIIREG:
- data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
- return 0;
+ return hclge_read_phy_reg(hdev, data->reg_num, &data->val_out);
case SIOCSMIIREG:
return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
@@@ -9445,8 -9444,15 +9444,8 @@@ static int hclge_do_ioctl(struct hnae3_
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return hclge_ptp_get_cfg(hdev, ifr);
- case SIOCSHWTSTAMP:
- return hclge_ptp_set_cfg(hdev, ifr);
- default:
- if (!hdev->hw.mac.phydev)
- return hclge_mii_ioctl(hdev, ifr, cmd);
- }
+ if (!hdev->hw.mac.phydev)
+ return hclge_mii_ioctl(hdev, ifr, cmd);
return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
}
@@@ -12894,8 -12900,6 +12893,8 @@@ static const struct hnae3_ae_ops hclge_
.get_dscp_prio = hclge_get_dscp_prio,
.get_wol = hclge_get_wol,
.set_wol = hclge_set_wol,
+ .hwtstamp_get = hclge_ptp_get_cfg,
+ .hwtstamp_set = hclge_ptp_set_cfg,
};
static struct hnae3_ae_algo ae_algo = {
diff --combined drivers/net/ethernet/intel/ice/ice_common.c
index 83f5217bce9f6,2532b6f82e971..046bc9c65c51f
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@@ -1161,9 -1161,6 +1161,9 @@@ int ice_init_hw(struct ice_hw *hw
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
+
+ ice_init_dev_hw(hw->back);
+
mutex_init(&hw->tnl_lock);
ice_init_chk_recipe_reuse_support(hw);
@@@ -3392,7 -3389,6 +3392,7 @@@ bool ice_is_100m_speed_supported(struc
case ICE_DEV_ID_E822L_SGMII:
case ICE_DEV_ID_E823L_1GBE:
case ICE_DEV_ID_E823C_SGMII:
+ case ICE_DEV_ID_E825C_SGMII:
return true;
default:
return false;
@@@ -4386,6 -4382,15 +4386,15 @@@ int ice_get_phy_lane_number(struct ice_
unsigned int lane;
int err;
+ /* E82X does not have sequential IDs, lane number is PF ID.
+ * For E825 device, the exception is the variant with external
+ * PHY (0x579F), in which there is also 1:1 pf_id -> lane_number
+ * mapping.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->device_id == ICE_DEV_ID_E825C_SGMII)
+ return hw->pf_id;
+
options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
if (!options)
return -ENOMEM;
@@@ -6500,6 -6505,28 +6509,28 @@@ u32 ice_get_link_speed(u16 index
return ice_aq_to_link_speed[index];
}
+ /**
+ * ice_get_dest_cgu - get destination CGU dev for given HW
+ * @hw: pointer to the HW struct
+ *
+ * Get CGU client id for CGU register read/write operations.
+ *
+ * Return: CGU device id to use in SBQ transactions.
+ */
+ static enum ice_sbq_dev_id ice_get_dest_cgu(struct ice_hw *hw)
+ {
+ /* On dual complex E825 only complex 0 has functional CGU powering all
+ * the PHYs.
+ * SBQ destination device cgu points to CGU on a current complex and to
+ * access primary CGU from the secondary complex, the driver should use
+ * cgu_peer as a destination device.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) &&
+ !ice_is_primary(hw))
+ return ice_sbq_dev_cgu_peer;
+ return ice_sbq_dev_cgu;
+ }
+
/**
* ice_read_cgu_reg - Read a CGU register
* @hw: Pointer to the HW struct
@@@ -6514,8 -6541,8 +6545,8 @@@
int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
{
struct ice_sbq_msg_input cgu_msg = {
+ .dest_dev = ice_get_dest_cgu(hw),
.opcode = ice_sbq_msg_rd,
- .dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr
};
int err;
@@@ -6546,8 -6573,8 +6577,8 @@@
int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val)
{
struct ice_sbq_msg_input cgu_msg = {
+ .dest_dev = ice_get_dest_cgu(hw),
.opcode = ice_sbq_msg_wr,
- .dest_dev = ice_sbq_dev_cgu,
.msg_addr_low = addr,
.data = val
};
diff --combined drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 06b8786ae3abd,013c93b6605ed..c8cb492fddf46
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@@ -574,7 -574,9 +574,7 @@@ ice_destroy_tunnel_end
int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
enum ice_tunnel_type tnl_type;
int status;
u16 index;
@@@ -596,7 -598,9 +596,7 @@@
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
enum ice_tunnel_type tnl_type;
int status;
@@@ -1475,7 -1479,7 +1475,7 @@@ static void ice_init_prof_masks(struct
per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
hw->blk[blk].masks.count = per_pf;
- hw->blk[blk].masks.first = hw->pf_id * per_pf;
+ hw->blk[blk].masks.first = hw->logical_pf_id * per_pf;
memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
diff --combined drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9735a75732cf5,44a142a041b2f..4092ea29c6308
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@@ -1978,7 -1978,6 +1978,6 @@@ static void esw_destroy_offloads_fdb_ta
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
- atomic64_set(&esw->user_count, 0);
}
static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
@@@ -3557,11 -3556,10 +3556,11 @@@ bool mlx5_esw_offloads_controller_valid
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
+ u8 mapping_id[MLX5_SW_IMAGE_GUID_MAX_BYTES];
struct mapping_ctx *reg_c0_obj_pool;
struct mlx5_vport *vport;
unsigned long i;
- u64 mapping_id;
+ u8 id_len;
int err;
mutex_init(&esw->offloads.termtbl_mutex);
@@@ -3583,10 -3581,9 +3582,10 @@@
if (err)
goto err_vport_metadata;
- mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
+ mlx5_query_nic_sw_system_image_guid(esw->dev, mapping_id, &id_len);
- reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
+ reg_c0_obj_pool = mapping_create_for_id(mapping_id, id_len,
+ MAPPING_TYPE_CHAIN,
sizeof(struct mlx5_mapped_obj),
ESW_REG_C0_USER_DATA_METADATA_MASK,
true);
diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index ba4eeba14baaa,7b90ecd3a55e6..1e69c1a7dc6c5
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@@ -40,14 -40,12 +40,14 @@@
#include <linux/phylink.h>
#include <linux/udp.h>
#include <linux/bpf_trace.h>
+#include <net/devlink.h>
#include <net/page_pool/helpers.h>
#include <net/pkt_cls.h>
#include <net/xdp_sock_drv.h>
#include "stmmac_ptp.h"
#include "stmmac_fpe.h"
#include "stmmac.h"
+#include "stmmac_pcs.h"
#include "stmmac_xdp.h"
#include <linux/reset.h>
#include <linux/of_mdio.h>
@@@ -59,7 -57,8 +59,7 @@@
* with fine resolution and binary rollover. This avoid non-monotonic behavior
* (clock jumps) when changing timestamping settings at runtime.
*/
-#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
- PTP_TCR_TSCTRLSSR)
+#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCTRLSSR)
#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
@@@ -148,15 -147,6 +148,15 @@@ static void stmmac_exit_fs(struct net_d
#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
+struct stmmac_devlink_priv {
+ struct stmmac_priv *stmmac_priv;
+};
+
+enum stmmac_dl_param_id {
+ STMMAC_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ STMMAC_DEVLINK_PARAM_ID_TS_COARSE,
+};
+
/**
* stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
* @bsp_priv: BSP private data structure (unused)
@@@ -455,7 -445,7 +455,7 @@@ static void stmmac_get_rx_hwtstamp(stru
if (!priv->hwts_rx_en)
return;
/* For GMAC4, the valid timestamp is from CTX next desc. */
- if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+ if (dwmac_is_xmac(priv->plat->core_type))
desc = np;
/* Check if timestamp is available */
@@@ -473,33 -463,6 +473,33 @@@
}
}
+static void stmmac_update_subsecond_increment(struct stmmac_priv *priv)
+{
+ bool xmac = dwmac_is_xmac(priv->plat->core_type);
+ u32 sec_inc = 0;
+ u64 temp = 0;
+
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
+
+ /* program Sub Second Increment reg */
+ stmmac_config_sub_second_increment(priv, priv->ptpaddr,
+ priv->plat->clk_ptp_rate,
+ xmac, &sec_inc);
+ temp = div_u64(1000000000ULL, sec_inc);
+
+ /* Store sub second increment for later use */
+ priv->sub_second_inc = sec_inc;
+
+ /* calculate default added value:
+ * formula is :
+ * addend = (2^32)/freq_div_ratio;
+ * where, freq_div_ratio = 1e9ns/sec_inc
+ */
+ temp = (u64)(temp << 32);
+ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
+ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+}
+
/**
* stmmac_hwtstamp_set - control hardware timestamping.
* @dev: device pointer.
@@@ -684,8 -647,6 +684,8 @@@ static int stmmac_hwtstamp_set(struct n
priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
priv->systime_flags = STMMAC_HWTS_ACTIVE;
+ if (!priv->tsfupdt_coarse)
+ priv->systime_flags |= PTP_TCR_TSCFUPDT;
if (priv->hwts_tx_en || priv->hwts_rx_en) {
priv->systime_flags |= tstamp_all | ptp_v2 |
@@@ -735,7 -696,10 +735,7 @@@ static int stmmac_hwtstamp_get(struct n
static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
u32 systime_flags)
{
- bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
struct timespec64 now;
- u32 sec_inc = 0;
- u64 temp = 0;
if (!priv->plat->clk_ptp_rate) {
netdev_err(priv->dev, "Invalid PTP clock rate");
@@@ -745,7 -709,23 +745,7 @@@
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags;
- /* program Sub Second Increment reg */
- stmmac_config_sub_second_increment(priv, priv->ptpaddr,
- priv->plat->clk_ptp_rate,
- xmac, &sec_inc);
- temp = div_u64(1000000000ULL, sec_inc);
-
- /* Store sub second increment for later use */
- priv->sub_second_inc = sec_inc;
-
- /* calculate default added value:
- * formula is :
- * addend = (2^32)/freq_div_ratio;
- * where, freq_div_ratio = 1e9ns/sec_inc
- */
- temp = (u64)(temp << 32);
- priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
- stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+ stmmac_update_subsecond_increment(priv);
/* initialize system time */
ktime_get_real_ts64(&now);
@@@ -765,7 -745,7 +765,7 @@@
*/
static int stmmac_init_timestamping(struct stmmac_priv *priv)
{
- bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ bool xmac = dwmac_is_xmac(priv->plat->core_type);
int ret;
if (priv->plat->ptp_clk_freq_config)
@@@ -776,8 -756,7 +776,8 @@@
return -EOPNOTSUPP;
}
- ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
+ ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE |
+ PTP_TCR_TSCFUPDT);
if (ret) {
netdev_warn(priv->dev, "PTP init failed\n");
return ret;
@@@ -871,13 -850,6 +871,13 @@@ static struct phylink_pcs *stmmac_mac_s
return pcs;
}
+ /* The PCS control register is only relevant for SGMII, TBI and RTBI
+ * modes. We no longer support TBI or RTBI, so only configure this
+ * register when operating in SGMII mode with the integrated PCS.
+ */
+ if (priv->hw->pcs & STMMAC_PCS_SGMII && priv->integrated_pcs)
+ return &priv->integrated_pcs->pcs;
+
return NULL;
}
@@@ -887,18 -859,6 +887,18 @@@ static void stmmac_mac_config(struct ph
/* Nothing to do, xpcs_config() handles everything */
}
+static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ if (priv->plat->mac_finish)
+ priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode,
interface);
+
+ return 0;
+}
+
static void stmmac_mac_link_down(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
@@@ -1093,16 -1053,14 +1093,16 @@@ static int stmmac_mac_enable_tx_lpi(str
return 0;
}
-static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface)
+static int stmmac_mac_wol_set(struct phylink_config *config, u32 wolopts,
+ const u8 *sopass)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- if (priv->plat->mac_finish)
- priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode,
interface);
+ device_set_wakeup_enable(priv->device, !!wolopts);
+
+ mutex_lock(&priv->lock);
+ priv->wolopts = wolopts;
+ mutex_unlock(&priv->lock);
return 0;
}
@@@ -1111,12 -1069,11 +1111,12 @@@ static const struct phylink_mac_ops stm
.mac_get_caps = stmmac_mac_get_caps,
.mac_select_pcs = stmmac_mac_select_pcs,
.mac_config = stmmac_mac_config,
+ .mac_finish = stmmac_mac_finish,
.mac_link_down = stmmac_mac_link_down,
.mac_link_up = stmmac_mac_link_up,
.mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
.mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
- .mac_finish = stmmac_mac_finish,
+ .mac_wol_set = stmmac_mac_wol_set,
};
/**
@@@ -1129,25 -1086,17 +1129,25 @@@
static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
{
int interface = priv->plat->phy_interface;
+ int speed = priv->plat->mac_port_sel_speed;
+
+ if (priv->dma_cap.pcs && interface == PHY_INTERFACE_MODE_SGMII) {
+ netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
+ priv->hw->pcs = STMMAC_PCS_SGMII;
- if (priv->dma_cap.pcs) {
- if ((interface == PHY_INTERFACE_MODE_RGMII) ||
- (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
- netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
- priv->hw->pcs = STMMAC_PCS_RGMII;
- } else if (interface == PHY_INTERFACE_MODE_SGMII) {
- netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
- priv->hw->pcs = STMMAC_PCS_SGMII;
+ switch (speed) {
+ case SPEED_10:
+ case SPEED_100:
+ case SPEED_1000:
+ priv->hw->reverse_sgmii_enable = true;
+ break;
+
+ default:
+ dev_warn(priv->device, "invalid port speed\n");
+ fallthrough;
+ case 0:
+ priv->hw->reverse_sgmii_enable = false;
+ break;
}
}
}
@@@ -1225,10 -1174,18 +1225,10 @@@ static int stmmac_init_phy(struct net_d
phylink_ethtool_set_eee(priv->phylink, &eee);
}
- if (!priv->plat->pmt) {
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
-
- phylink_ethtool_get_wol(priv->phylink, &wol);
- device_set_wakeup_capable(priv->device, !!wol.supported);
- device_set_wakeup_enable(priv->device, !!wol.wolopts);
- }
-
return 0;
}
-static int stmmac_phy_setup(struct stmmac_priv *priv)
+static int stmmac_phylink_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
struct phylink_config *config;
@@@ -1293,16 -1250,6 +1293,16 @@@
config->eee_enabled_default = true;
}
+ config->wol_phy_speed_ctrl = true;
+ if (priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL) {
+ config->wol_phy_legacy = true;
+ } else {
+ if (priv->dma_cap.pmt_remote_wake_up)
+ config->wol_mac_support |= WAKE_UCAST;
+ if (priv->dma_cap.pmt_magic_frame)
+ config->wol_mac_support |= WAKE_MAGIC;
+ }
+
fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@@ -2450,7 -2397,7 +2450,7 @@@ static void stmmac_dma_operation_mode(s
txfifosz = priv->dma_cap.tx_fifo_size;
/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
- if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
+ if (dwmac_is_xmac(priv->plat->core_type)) {
rxfifosz /= rx_channels_count;
txfifosz /= tx_channels_count;
}
@@@ -3496,6 -3443,19 +3496,6 @@@ static int stmmac_hw_setup(struct net_d
stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
phylink_rx_clk_stop_unblock(priv->phylink);
- /* PS and related bits will be programmed according to the speed */
- if (priv->hw->pcs) {
- int speed = priv->plat->mac_port_sel_speed;
-
- if ((speed == SPEED_10) || (speed == SPEED_100) ||
- (speed == SPEED_1000)) {
- priv->hw->ps = speed;
- } else {
- dev_warn(priv->device, "invalid port speed\n");
- priv->hw->ps = 0;
- }
- }
-
/* Initialize the MAC Core */
stmmac_core_init(priv, priv->hw, dev);
@@@ -3532,6 -3492,9 +3532,6 @@@
}
}
- if (priv->hw->pcs)
- stmmac_pcs_ctrl_ane(priv, 1, priv->hw->ps, 0);
-
/* set TX and RX rings length */
stmmac_set_rings_length(priv);
@@@ -4000,6 -3963,8 +4000,6 @@@ static int __stmmac_open(struct net_dev
stmmac_init_coalesce(priv);
phylink_start(priv->phylink);
- /* We may have called phylink_speed_down before */
- phylink_speed_up(priv->phylink);
ret = stmmac_request_irq(dev);
if (ret)
@@@ -4050,9 -4015,6 +4050,9 @@@ static int stmmac_open(struct net_devic
kfree(dma_conf);
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(priv->phylink);
+
return ret;
err_disconnect_phy:
@@@ -4070,6 -4032,13 +4070,6 @@@ static void __stmmac_release(struct net
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
- /* If the PHY or MAC has WoL enabled, then the PHY will not be
- * suspended when phylink_stop() is called below. Set the PHY
- * to its slowest speed to save power.
- */
- if (device_may_wakeup(priv->device))
- phylink_speed_down(priv->phylink, false);
-
/* Stop and disconnect the PHY */
phylink_stop(priv->phylink);
@@@ -4109,13 -4078,6 +4109,13 @@@ static int stmmac_release(struct net_de
{
struct stmmac_priv *priv = netdev_priv(dev);
+ /* If the PHY or MAC has WoL enabled, then the PHY will not be
+ * suspended when phylink_stop() is called below. Set the PHY
+ * to its slowest speed to save power.
+ */
+ if (device_may_wakeup(priv->device))
+ phylink_speed_down(priv->phylink, false);
+
__stmmac_release(dev);
phylink_disconnect_phy(priv->phylink);
@@@ -4127,18 -4089,11 +4127,11 @@@
static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
struct stmmac_tx_queue *tx_q)
{
- u16 tag = 0x0, inner_tag = 0x0;
- u32 inner_type = 0x0;
struct dma_desc *p;
+ u16 tag = 0x0;
- if (!priv->dma_cap.vlins)
+ if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
return false;
- if (!skb_vlan_tag_present(skb))
- return false;
- if (skb->vlan_proto == htons(ETH_P_8021AD)) {
- inner_tag = skb_vlan_tag_get(skb);
- inner_type = STMMAC_VLAN_INSERT;
- }
tag = skb_vlan_tag_get(skb);
@@@ -4147,7 -4102,7 +4140,7 @@@
else
p = &tx_q->dma_tx[tx_q->cur_tx];
- if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
return false;
stmmac_set_tx_owner(priv, p);
@@@ -4545,6 -4500,7 +4538,7 @@@ static netdev_tx_t stmmac_xmit(struct s
bool has_vlan, set_ic;
int entry, first_tx;
dma_addr_t des;
+ u32 sdu_len;
tx_q = &priv->dma_conf.tx_queue[queue];
txq_stats = &priv->xstats.txq_stats[queue];
@@@ -4557,16 -4513,20 +4551,21 @@@
if (skb_is_gso(skb) && priv->tso) {
if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
return stmmac_tso_xmit(skb, dev);
- if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4 &&
+ (gso & SKB_GSO_UDP_L4))
return stmmac_tso_xmit(skb, dev);
}
if (priv->est && priv->est->enable &&
- priv->est->max_sdu[queue] &&
- skb->len > priv->est->max_sdu[queue]){
- priv->xstats.max_sdu_txq_drop[queue]++;
- goto max_sdu_err;
+ priv->est->max_sdu[queue]) {
+ sdu_len = skb->len;
+ /* Add VLAN tag length if VLAN tag insertion offload is
requested */
+ if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
+ sdu_len += VLAN_HLEN;
+ if (sdu_len > priv->est->max_sdu[queue]) {
+ priv->xstats.max_sdu_txq_drop[queue]++;
+ goto max_sdu_err;
+ }
}
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@@ -6011,7 -5971,7 +6010,7 @@@ static void stmmac_common_interrupt(str
u32 queue;
bool xmac;
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ xmac = dwmac_is_xmac(priv->plat->core_type);
queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
if (priv->irq_wake)
@@@ -6025,7 -5985,7 +6024,7 @@@
stmmac_fpe_irq_status(priv);
/* To handle GMAC own interrupts */
- if ((priv->plat->has_gmac) || xmac) {
+ if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) {
int status = stmmac_host_irq_status(priv, priv->hw,
&priv->xstats);
if (unlikely(status)) {
@@@ -6039,6 -5999,15 +6038,6 @@@
for (queue = 0; queue < queues_count; queue++)
stmmac_host_mtl_irq_status(priv, priv->hw, queue);
- /* PCS link status */
- if (priv->hw->pcs &&
- !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
- if (priv->xstats.pcs_link)
- netif_carrier_on(priv->dev);
- else
- netif_carrier_off(priv->dev);
- }
-
stmmac_timestamp_interrupt(priv, priv);
}
}
@@@ -6386,7 -6355,7 +6385,7 @@@ static int stmmac_dma_cap_show(struct s
(priv->dma_cap.mbps_1000) ? "Y" : "N");
seq_printf(seq, "\tHalf duplex: %s\n",
(priv->dma_cap.half_duplex) ? "Y" : "N");
- if (priv->plat->has_xgmac) {
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
seq_printf(seq,
"\tNumber of Additional MAC address registers: %d\n",
priv->dma_cap.multi_addr);
@@@ -6410,7 -6379,7 +6409,7 @@@
(priv->dma_cap.time_stamp) ? "Y" : "N");
seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
(priv->dma_cap.atime_stamp) ? "Y" : "N");
- if (priv->plat->has_xgmac)
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC)
seq_printf(seq, "\tTimestamp System Time Source: %s\n",
dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
@@@ -6419,7 -6388,7 +6418,7 @@@
seq_printf(seq, "\tChecksum Offload in TX: %s\n",
(priv->dma_cap.tx_coe) ? "Y" : "N");
if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
- priv->plat->has_xgmac) {
+ priv->plat->core_type == DWMAC_CORE_XGMAC) {
seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
(priv->dma_cap.rx_coe) ? "Y" : "N");
} else {
@@@ -7271,21 -7240,13 +7270,21 @@@ static int stmmac_hw_init(struct stmmac
* has to be disable and this can be done by passing the
* riwt_off field from the platform.
*/
- if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
- (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
+ if ((priv->synopsys_id >= DWMAC_CORE_3_50 ||
+ priv->plat->core_type == DWMAC_CORE_XGMAC) &&
+ !priv->plat->riwt_off) {
priv->use_riwt = 1;
dev_info(priv->device,
"Enable RX Mitigation via HW Watchdog Timer\n");
}
+ /* Unimplemented PCS init (as indicated by stmmac_do_callback()
+ * perversely returning -EINVAL) is non-fatal.
+ */
+ ret = stmmac_mac_pcs_init(priv);
+ if (ret != -EINVAL)
+ return ret;
+
return 0;
}
@@@ -7394,7 -7355,7 +7393,7 @@@ static int stmmac_xdp_rx_timestamp(cons
return -ENODATA;
/* For GMAC4, the valid timestamp is from CTX next desc. */
- if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+ if (dwmac_is_xmac(priv->plat->core_type))
desc_contains_ts = ndesc;
/* Check if timestamp is available */
@@@ -7412,95 -7373,6 +7411,95 @@@ static const struct xdp_metadata_ops st
.xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
};
+static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
+ struct stmmac_priv *priv = dl_priv->stmmac_priv;
+
+ priv->tsfupdt_coarse = ctx->val.vbool;
+
+ if (priv->tsfupdt_coarse)
+ priv->systime_flags &= ~PTP_TCR_TSCFUPDT;
+ else
+ priv->systime_flags |= PTP_TCR_TSCFUPDT;
+
+ /* In Coarse mode, we can use a smaller subsecond increment, let's
+ * reconfigure the systime, subsecond increment and addend.
+ */
+ stmmac_update_subsecond_increment(priv);
+
+ return 0;
+}
+
+static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
+ struct stmmac_priv *priv = dl_priv->stmmac_priv;
+
+ ctx->val.vbool = priv->tsfupdt_coarse;
+
+ return 0;
+}
+
+static const struct devlink_param stmmac_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(STMMAC_DEVLINK_PARAM_ID_TS_COARSE, "ts_coarse",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ stmmac_dl_ts_coarse_get,
+ stmmac_dl_ts_coarse_set, NULL),
+};
+
+/* None of the generic devlink parameters are implemented */
+static const struct devlink_ops stmmac_devlink_ops = {};
+
+static int stmmac_register_devlink(struct stmmac_priv *priv)
+{
+ struct stmmac_devlink_priv *dl_priv;
+ int ret;
+
+ /* For now, what is exposed over devlink is only relevant when
+ * timestamping is available and we have a valid ptp clock rate
+ */
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp) ||
+ !priv->plat->clk_ptp_rate)
+ return 0;
+
+ priv->devlink = devlink_alloc(&stmmac_devlink_ops, sizeof(*dl_priv),
+ priv->device);
+ if (!priv->devlink)
+ return -ENOMEM;
+
+ dl_priv = devlink_priv(priv->devlink);
+ dl_priv->stmmac_priv = priv;
+
+ ret = devlink_params_register(priv->devlink, stmmac_devlink_params,
+ ARRAY_SIZE(stmmac_devlink_params));
+ if (ret)
+ goto dl_free;
+
+ devlink_register(priv->devlink);
+ return 0;
+
+dl_free:
+ devlink_free(priv->devlink);
+
+ return ret;
+}
+
+static void stmmac_unregister_devlink(struct stmmac_priv *priv)
+{
+ if (!priv->devlink)
+ return;
+
+ devlink_unregister(priv->devlink);
+ devlink_params_unregister(priv->devlink, stmmac_devlink_params,
+ ARRAY_SIZE(stmmac_devlink_params));
+ devlink_free(priv->devlink);
+}
+
/**
* stmmac_dvr_probe
* @device: device pointer
@@@ -7639,7 -7511,7 +7638,7 @@@ int stmmac_dvr_probe(struct device *dev
if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
- if (priv->plat->has_gmac4)
+ if (priv->plat->core_type == DWMAC_CORE_GMAC4)
ndev->hw_features |= NETIF_F_GSO_UDP_L4;
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
@@@ -7692,7 -7564,7 +7691,7 @@@
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
- if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
+ if (dwmac_is_xmac(priv->plat->core_type)) {
ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
priv->hw->hw_vlan_en = true;
}
@@@ -7700,11 -7572,8 +7699,8 @@@
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
}
- if (priv->dma_cap.vlins) {
+ if (priv->dma_cap.vlins)
ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
- if (priv->dma_cap.dvlan)
- ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
- }
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
@@@ -7723,7 -7592,7 +7719,7 @@@
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
- if (priv->plat->has_xgmac)
+ if (priv->plat->core_type == DWMAC_CORE_XGMAC)
ndev->max_mtu = XGMAC_JUMBO_LEN;
else if ((priv->plat->enh_desc) || (priv->synopsys_id >=
DWMAC_CORE_4_00))
ndev->max_mtu = JUMBO_LEN;
@@@ -7768,16 -7637,12 +7764,16 @@@
if (ret)
goto error_pcs_setup;
- ret = stmmac_phy_setup(priv);
+ ret = stmmac_phylink_setup(priv);
if (ret) {
netdev_err(ndev, "failed to setup phy (%d)\n", ret);
goto error_phy_setup;
}
+ ret = stmmac_register_devlink(priv);
+ if (ret)
+ goto error_devlink_setup;
+
ret = register_netdev(ndev);
if (ret) {
dev_err(priv->device, "%s: ERROR %i registering the device\n",
@@@ -7800,8 -7665,6 +7796,8 @@@
return ret;
error_netdev_register:
+ stmmac_unregister_devlink(priv);
+error_devlink_setup:
phylink_destroy(priv->phylink);
error_phy_setup:
stmmac_pcs_clean(ndev);
@@@ -7838,8 -7701,6 +7834,8 @@@ void stmmac_dvr_remove(struct device *d
#ifdef CONFIG_DEBUG_FS
stmmac_exit_fs(ndev);
#endif
+ stmmac_unregister_devlink(priv);
+
phylink_destroy(priv->phylink);
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
@@@ -7894,7 -7755,7 +7890,7 @@@ int stmmac_suspend(struct device *dev
priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
/* Enable Power down mode by programming the PMT regs */
- if (stmmac_wol_enabled_mac(priv)) {
+ if (priv->wolopts) {
stmmac_pmt(priv, priv->hw, priv->wolopts);
priv->irq_wake = 1;
} else {
@@@ -7905,7 -7766,10 +7901,7 @@@
mutex_unlock(&priv->lock);
rtnl_lock();
- if (stmmac_wol_enabled_phy(priv))
- phylink_speed_down(priv->phylink, false);
-
- phylink_suspend(priv->phylink, stmmac_wol_enabled_mac(priv));
+ phylink_suspend(priv->phylink, !!priv->wolopts);
rtnl_unlock();
if (stmmac_fpe_supported(priv))
@@@ -7981,7 -7845,7 +7977,7 @@@ int stmmac_resume(struct device *dev
* this bit because it can generate problems while resuming
* from another devices (e.g. serial console).
*/
- if (stmmac_wol_enabled_mac(priv)) {
+ if (priv->wolopts) {
mutex_lock(&priv->lock);
stmmac_pmt(priv, priv->hw, 0);
mutex_unlock(&priv->lock);
@@@ -8043,6 -7907,9 +8039,6 @@@
* workqueue thread, which will race with initialisation.
*/
phylink_resume(priv->phylink);
- if (stmmac_wol_enabled_phy(priv))
- phylink_speed_up(priv->phylink);
-
rtnl_unlock();
netif_device_attach(ndev);
diff --combined drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index ef65cf511f3e2,3b4d4696afe96..d786527185999
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@@ -262,10 -262,10 +262,10 @@@ static int tc_init(struct stmmac_priv *
unsigned int count;
int ret, i;
- if (dma_cap->l3l4fnum) {
- priv->flow_entries_max = dma_cap->l3l4fnum;
+ priv->flow_entries_max = dma_cap->l3l4fnum;
+ if (priv->flow_entries_max) {
priv->flow_entries = devm_kcalloc(priv->device,
- dma_cap->l3l4fnum,
+ priv->flow_entries_max,
sizeof(*priv->flow_entries),
GFP_KERNEL);
if (!priv->flow_entries)
@@@ -981,7 -981,7 +981,7 @@@ static int tc_taprio_configure(struct s
if (qopt->cmd == TAPRIO_CMD_DESTROY)
goto disable;
- if (qopt->num_entries >= dep)
+ if (qopt->num_entries > dep)
return -EINVAL;
if (!qopt->cycle_time)
return -ERANGE;
@@@ -1012,7 -1012,7 +1012,7 @@@
s64 delta_ns = qopt->entries[i].interval;
u32 gates = qopt->entries[i].gate_mask;
- if (delta_ns > GENMASK(wid, 0))
+ if (delta_ns > GENMASK(wid - 1, 0))
return -ERANGE;
if (gates > GENMASK(31 - wid, 0))
return -ERANGE;
diff --combined drivers/net/usb/usbnet.c
index 62a85dbad31a5,697cd9d866d3d..f3087fb62f4f8
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@@ -189,7 -189,7 +189,7 @@@ static bool usbnet_needs_usb_name_forma
is_local_ether_addr(net->dev_addr));
}
-static void intr_complete (struct urb *urb)
+static void intr_complete(struct urb *urb)
{
struct usbnet *dev = urb->context;
int status = urb->status;
@@@ -221,7 -221,7 +221,7 @@@
"intr resubmit --> %d\n", status);
}
-static int init_status (struct usbnet *dev, struct usb_interface *intf)
+static int init_status(struct usbnet *dev, struct usb_interface *intf)
{
char *buf = NULL;
unsigned pipe = 0;
@@@ -326,7 -326,7 +326,7 @@@ static void __usbnet_status_stop_force(
* Some link protocols batch packets, so their rx_fixup paths
* can return clones as well as just modify the original skb.
*/
-void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
+void usbnet_skb_return(struct usbnet *dev, struct sk_buff *skb)
{
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->net->tstats);
unsigned long flags;
@@@ -396,7 -396,7 +396,7 @@@ EXPORT_SYMBOL_GPL(usbnet_update_max_qle
*
*-------------------------------------------------------------------------*/
-int usbnet_change_mtu (struct net_device *net, int new_mtu)
+int usbnet_change_mtu(struct net_device *net, int new_mtu)
{
struct usbnet *dev = netdev_priv(net);
int ll_mtu = new_mtu + net->hard_header_len;
@@@ -472,7 -472,7 +472,7 @@@ static enum skb_state defer_bh(struct u
* NOTE: annoying asymmetry: if it's active, schedule_work() fails,
* but tasklet_schedule() doesn't. hope the failure is rare.
*/
-void usbnet_defer_kevent (struct usbnet *dev, int work)
+void usbnet_defer_kevent(struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
if (!usbnet_going_away(dev)) {
@@@ -489,9 -489,9 +489,9 @@@ EXPORT_SYMBOL_GPL(usbnet_defer_kevent)
/*-------------------------------------------------------------------------*/
-static void rx_complete (struct urb *urb);
+static void rx_complete(struct urb *urb);
-static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
+static int rx_submit(struct usbnet *dev, struct urb *urb, gfp_t flags)
{
struct sk_buff *skb;
struct skb_data *entry;
@@@ -597,7 -597,7 +597,7 @@@ static inline int rx_process(struct usb
/*-------------------------------------------------------------------------*/
-static void rx_complete (struct urb *urb)
+static void rx_complete(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct skb_data *entry = (struct skb_data *) skb->cb;
@@@ -728,7 -728,7 +728,7 @@@ EXPORT_SYMBOL_GPL(usbnet_purge_paused_r
// unlink pending rx/tx; completion handlers do all other cleanup
-static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
+static int unlink_urbs(struct usbnet *dev, struct sk_buff_head *q)
{
unsigned long flags;
struct sk_buff *skb;
@@@ -823,7 -823,7 +823,7 @@@ static void usbnet_terminate_urbs(struc
remove_wait_queue(&dev->wait, &wait);
}
-int usbnet_stop (struct net_device *net)
+int usbnet_stop(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
const struct driver_info *info = dev->driver_info;
@@@ -892,7 -892,7 +892,7 @@@ EXPORT_SYMBOL_GPL(usbnet_stop)
// precondition: never called in_interrupt
-int usbnet_open (struct net_device *net)
+int usbnet_open(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
int retval;
@@@ -1048,7 -1048,7 +1048,7 @@@ int usbnet_set_link_ksettings_mii(struc
}
EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings_mii);
-u32 usbnet_get_link (struct net_device *net)
+u32 usbnet_get_link(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
@@@ -1076,7 -1076,7 +1076,7 @@@ int usbnet_nway_reset(struct net_devic
}
EXPORT_SYMBOL_GPL(usbnet_nway_reset);
-void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
+void usbnet_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
{
struct usbnet *dev = netdev_priv(net);
@@@ -1087,7 -1087,7 +1087,7 @@@
}
EXPORT_SYMBOL_GPL(usbnet_get_drvinfo);
-u32 usbnet_get_msglevel (struct net_device *net)
+u32 usbnet_get_msglevel(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
@@@ -1095,7 -1095,7 +1095,7 @@@
}
EXPORT_SYMBOL_GPL(usbnet_get_msglevel);
-void usbnet_set_msglevel (struct net_device *net, u32 level)
+void usbnet_set_msglevel(struct net_device *net, u32 level)
{
struct usbnet *dev = netdev_priv(net);
@@@ -1166,7 -1166,7 +1166,7 @@@ static void __handle_set_rx_mode(struc
* especially now that control transfers can be queued.
*/
static void
-usbnet_deferred_kevent (struct work_struct *work)
+usbnet_deferred_kevent(struct work_struct *work)
{
struct usbnet *dev =
container_of(work, struct usbnet, kevent);
@@@ -1277,7 -1277,7 +1277,7 @@@ skip_reset
/*-------------------------------------------------------------------------*/
-static void tx_complete (struct urb *urb)
+static void tx_complete(struct urb *urb)
{
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct skb_data *entry = (struct skb_data *) skb->cb;
@@@ -1332,7 -1332,7 +1332,7 @@@
/*-------------------------------------------------------------------------*/
-void usbnet_tx_timeout (struct net_device *net, unsigned int txqueue)
+void usbnet_tx_timeout(struct net_device *net, unsigned int txqueue)
{
struct usbnet *dev = netdev_priv(net);
@@@ -1382,7 -1382,8 +1382,7 @@@ static int build_dma_sg(const struct sk
return 1;
}
-netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
- struct net_device *net)
+netdev_tx_t usbnet_start_xmit(struct sk_buff *skb, struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
unsigned int length;
@@@ -1560,7 -1561,7 +1560,7 @@@ static inline void usb_free_skb(struct
// work (work deferred from completions, in_irq) or timer
-static void usbnet_bh (struct timer_list *t)
+static void usbnet_bh(struct timer_list *t)
{
struct usbnet *dev = timer_container_of(dev, t, delay);
struct sk_buff *skb;
@@@ -1635,7 -1636,7 +1635,7 @@@ static void usbnet_bh_work(struct work_
// precondition: never called in_interrupt
-void usbnet_disconnect (struct usb_interface *intf)
+void usbnet_disconnect(struct usb_interface *intf)
{
struct usbnet *dev;
struct usb_device *xdev;
@@@ -1658,6 -1659,8 +1658,8 @@@
net = dev->net;
unregister_netdev (net);
+ cancel_work_sync(&dev->kevent);
+
while ((urb = usb_get_from_anchor(&dev->deferred))) {
dev_kfree_skb(urb->context);
kfree(urb->sg);
@@@ -1699,7 -1702,7 +1701,7 @@@ static const struct device_type wwan_ty
};
int
-usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+usbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
{
struct usbnet *dev;
struct net_device *net;
@@@ -1906,7 -1909,7 +1908,7 @@@ EXPORT_SYMBOL_GPL(usbnet_probe)
* resume only when the last interface is resumed
*/
-int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
+int usbnet_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
@@@ -1939,7 -1942,7 +1941,7 @@@
}
EXPORT_SYMBOL_GPL(usbnet_suspend);
-int usbnet_resume (struct usb_interface *intf)
+int usbnet_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
struct sk_buff *skb;
diff --combined include/net/tcp.h
index 190b3714e93b3,ab20f549b8f91..4fd6d8d1230d0
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@@ -303,9 -303,6 +303,9 @@@ static inline bool tcp_under_memory_pre
mem_cgroup_sk_under_memory_pressure(sk))
return true;
+ if (sk->sk_bypass_prot_mem)
+ return false;
+
return READ_ONCE(tcp_memory_pressure);
}
/*
@@@ -373,7 -370,7 +373,7 @@@ void tcp_delack_timer_handler(struct so
int tcp_ioctl(struct sock *sk, int cmd, int *karg);
enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff
*skb);
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
- void tcp_rcvbuf_grow(struct sock *sk);
+ void tcp_rcvbuf_grow(struct sock *sk, u32 newval);
void tcp_rcv_space_adjust(struct sock *sk);
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
void tcp_twsk_destructor(struct sock *sk);
@@@ -464,8 -461,6 +464,8 @@@ enum skb_drop_reason tcp_child_process(
void tcp_enter_loss(struct sock *sk);
void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int
newly_lost, int flag);
void tcp_clear_retrans(struct tcp_sock *tp);
+void tcp_update_pacing_rate(struct sock *sk);
+void tcp_set_rto(struct sock *sk);
void tcp_update_metrics(struct sock *sk);
void tcp_init_metrics(struct sock *sk);
void tcp_metrics_init(void);
@@@ -1901,6 -1896,13 +1901,6 @@@ struct tcp6_pseudohdr
__be32 protocol; /* including padding */
};
-union tcp_md5sum_block {
- struct tcp4_pseudohdr ip4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct tcp6_pseudohdr ip6;
-#endif
-};
-
/*
* struct tcp_sigpool - per-CPU pool of ahash_requests
* @scratch: per-CPU temporary area, that can be used between
@@@ -1935,8 -1937,8 +1935,8 @@@ int tcp_sigpool_start(unsigned int id,
void tcp_sigpool_end(struct tcp_sigpool *c);
size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
/* - functions */
-int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
- const struct sock *sk, const struct sk_buff *skb);
+void tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
+ const struct sock *sk, const struct sk_buff *skb);
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, u8 prefixlen, int l3index, u8 flags,
const u8 *newkey, u8 newkeylen);
@@@ -1995,10 -1997,13 +1995,10 @@@ static inline void tcp_md5_destruct_soc
}
#endif
-int tcp_md5_alloc_sigpool(void);
-void tcp_md5_release_sigpool(void);
-void tcp_md5_add_sigpool(void);
-extern int tcp_md5_sigpool_id;
-
-int tcp_md5_hash_key(struct tcp_sigpool *hp,
- const struct tcp_md5sig_key *key);
+struct md5_ctx;
+void tcp_md5_hash_skb_data(struct md5_ctx *ctx, const struct sk_buff *skb,
+ unsigned int header_len);
+void tcp_md5_hash_key(struct md5_ctx *ctx, const struct tcp_md5sig_key *key);
/* From tcp_fastopen.c */
void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
@@@ -2348,7 -2353,7 +2348,7 @@@ struct tcp_sock_af_ops
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
const struct sock *addr_sk);
- int (*calc_md5_hash)(char *location,
+ void (*calc_md5_hash)(char *location,
const struct tcp_md5sig_key *md5,
const struct sock *sk,
const struct sk_buff *skb);
@@@ -2376,7 -2381,7 +2376,7 @@@ struct tcp_request_sock_ops
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
const struct sock *addr_sk);
- int (*calc_md5_hash) (char *location,
+ void (*calc_md5_hash) (char *location,
const struct tcp_md5sig_key *md5,
const struct sock *sk,
const struct sk_buff *skb);
diff --combined include/net/tls.h
index f2af113728aae,c7bcdb3afad75..ebd2550280ae2
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@@ -53,8 -53,6 +53,8 @@@ struct tls_rec
/* Maximum data size carried in a TLS record */
#define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14)
+/* Minimum record size limit as per RFC8449 */
+#define TLS_MIN_RECORD_SIZE_LIM ((size_t)1 << 6)
#define TLS_HEADER_SIZE 5
#define TLS_NONCE_OFFSET TLS_HEADER_SIZE
@@@ -228,7 -226,6 +228,7 @@@ struct tls_context
u8 rx_conf:3;
u8 zerocopy_sendfile:1;
u8 rx_no_pad:1;
+ u16 tx_max_payload_len;
int (*push_pending_record)(struct sock *sk, int flags);
void (*sk_write_space)(struct sock *sk);
@@@ -454,25 -451,26 +454,26 @@@ static inline void tls_offload_rx_resyn
/* Log all TLS record header TCP sequences in [seq, seq+len] */
static inline void
- tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16
len)
+ tls_offload_rx_resync_async_request_start(struct tls_offload_resync_async
*resync_async,
+ __be32 seq, u16 len)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
-
- atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
+ atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) |
((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
- rx_ctx->resync_async->loglen = 0;
- rx_ctx->resync_async->rcd_delta = 0;
+ resync_async->loglen = 0;
+ resync_async->rcd_delta = 0;
}
static inline void
- tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
+ tls_offload_rx_resync_async_request_end(struct tls_offload_resync_async
*resync_async,
+ __be32 seq)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+ atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
+ }
- atomic64_set(&rx_ctx->resync_async->req,
- ((u64)ntohl(seq) << 32) | RESYNC_REQ);
+ static inline void
+ tls_offload_rx_resync_async_request_cancel(struct tls_offload_resync_async
*resync_async)
+ {
+ atomic64_set(&resync_async->req, 0);
}
static inline void
diff --combined net/ipv4/tcp_input.c
index ff19f6e54d55c,e4a979b75cc66..6db1d4c36a88b
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@@ -891,18 -891,27 +891,27 @@@ static inline void tcp_rcv_rtt_measure_
}
}
- void tcp_rcvbuf_grow(struct sock *sk)
+ void tcp_rcvbuf_grow(struct sock *sk, u32 newval)
{
const struct net *net = sock_net(sk);
struct tcp_sock *tp = tcp_sk(sk);
- int rcvwin, rcvbuf, cap;
+ u32 rcvwin, rcvbuf, cap, oldval;
+ u64 grow;
+
+ oldval = tp->rcvq_space.space;
+ tp->rcvq_space.space = newval;
if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
return;
+ /* DRS is always one RTT late. */
+ rcvwin = newval << 1;
+
/* slow start: allow the sender to double its rate. */
- rcvwin = tp->rcvq_space.space << 1;
+ grow = (u64)rcvwin * (newval - oldval);
+ do_div(grow, oldval);
+ rcvwin += grow << 1;
if (!RB_EMPTY_ROOT(&tp->out_of_order_queue))
rcvwin += TCP_SKB_CB(tp->ooo_last_skb)->end_seq - tp->rcv_nxt;
@@@ -928,15 -937,9 +937,15 @@@ void tcp_rcv_space_adjust(struct sock *
trace_tcp_rcv_space_adjust(sk);
- tcp_mstamp_refresh(tp);
+ if (unlikely(!tp->rcv_rtt_est.rtt_us))
+ return;
+
+ /* We do not refresh tp->tcp_mstamp here.
+ * Some platforms have expensive ktime_get() implementations.
+ * Using the last cached value is enough for DRS.
+ */
time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
- if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
+ if (time < (tp->rcv_rtt_est.rtt_us >> 3))
return;
/* Number of bytes copied to user in last RTT */
@@@ -949,9 -952,7 +958,7 @@@
trace_tcp_rcvbuf_grow(sk, time);
- tp->rcvq_space.space = copied;
-
- tcp_rcvbuf_grow(sk);
+ tcp_rcvbuf_grow(sk, copied);
new_measure:
tp->rcvq_space.seq = tp->copied_seq;
@@@ -1101,7 -1102,7 +1108,7 @@@ static void tcp_rtt_estimator(struct so
tp->srtt_us = max(1U, srtt);
}
-static void tcp_update_pacing_rate(struct sock *sk)
+void tcp_update_pacing_rate(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
u64 rate;
@@@ -1138,7 -1139,7 +1145,7 @@@
/* Calculate rto without backoff. This is the second half of Van Jacobson's
* routine referred to above.
*/
-static void tcp_set_rto(struct sock *sk)
+void tcp_set_rto(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
/* Old crap is replaced with new one. 8)
@@@ -5276,7 -5277,7 +5283,7 @@@ end
}
/* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */
if (sk->sk_socket)
- tcp_rcvbuf_grow(sk);
+ tcp_rcvbuf_grow(sk, tp->rcvq_space.space);
}
static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
diff --combined net/mptcp/protocol.c
index 94a5f6dcc5775,2d6b8de35c449..d568575cdcb59
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@@ -194,17 -194,26 +194,26 @@@ static bool mptcp_ooo_try_coalesce(stru
* - mptcp does not maintain a msk-level window clamp
* - returns true when the receive buffer is actually updated
*/
- static bool mptcp_rcvbuf_grow(struct sock *sk)
+ static bool mptcp_rcvbuf_grow(struct sock *sk, u32 newval)
{
struct mptcp_sock *msk = mptcp_sk(sk);
const struct net *net = sock_net(sk);
- int rcvwin, rcvbuf, cap;
+ u32 rcvwin, rcvbuf, cap, oldval;
+ u64 grow;
+ oldval = msk->rcvq_space.space;
+ msk->rcvq_space.space = newval;
if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
return false;
- rcvwin = msk->rcvq_space.space << 1;
+ /* DRS is always one RTT late. */
+ rcvwin = newval << 1;
+
+ /* slow start: allow the sender to double its rate. */
+ grow = (u64)rcvwin * (newval - oldval);
+ do_div(grow, oldval);
+ rcvwin += grow << 1;
if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
rcvwin += MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq -
msk->ack_seq;
@@@ -334,7 -343,7 +343,7 @@@ end
skb_set_owner_r(skb, sk);
/* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */
if (sk->sk_socket)
- mptcp_rcvbuf_grow(sk);
+ mptcp_rcvbuf_grow(sk, msk->rcvq_space.space);
}
static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset,
@@@ -998,7 -1007,7 +1007,7 @@@ static void __mptcp_clean_una(struct so
if (WARN_ON_ONCE(!msk->recovery))
break;
- WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
+ msk->first_pending = mptcp_send_next(sk);
}
dfrag_clear(sk, dfrag);
@@@ -1065,12 -1074,11 +1074,12 @@@ static void mptcp_enter_memory_pressure
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- if (first)
+ if (first && !ssk->sk_bypass_prot_mem) {
tcp_enter_memory_pressure(ssk);
- sk_stream_moderate_sndbuf(ssk);
+ first = false;
+ }
- first = false;
+ sk_stream_moderate_sndbuf(ssk);
}
__mptcp_sync_sndbuf(sk);
}
@@@ -1291,7 -1299,12 +1300,12 @@@ alloc_skb
if (copy == 0) {
u64 snd_una = READ_ONCE(msk->snd_una);
- if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) {
+ /* No need for zero probe if there are any data pending
+ * either at the msk or ssk level; skb is the current write
+ * queue tail and can be empty at this point.
+ */
+ if (snd_una != msk->snd_nxt || skb->len ||
+ skb != tcp_send_head(ssk)) {
tcp_remove_empty_skb(ssk);
return 0;
}
@@@ -1342,6 -1355,7 +1356,7 @@@
mpext->dsn64);
if (zero_window_probe) {
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_WINPROBE);
mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
mpext->frozen = 1;
if (READ_ONCE(msk->csum_enabled))
@@@ -1544,7 -1558,7 +1559,7 @@@ static int __subflow_push_pending(struc
mptcp_update_post_push(msk, dfrag, ret);
}
- WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
+ msk->first_pending = mptcp_send_next(sk);
if (msk->snd_burst <= 0 ||
!sk_stream_memory_free(ssk) ||
@@@ -1904,7 -1918,7 +1919,7 @@@ static int mptcp_sendmsg(struct sock *s
get_page(dfrag->page);
list_add_tail(&dfrag->list, &msk->rtx_queue);
if (!msk->first_pending)
- WRITE_ONCE(msk->first_pending, dfrag);
+ msk->first_pending = dfrag;
}
pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n",
msk,
dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
@@@ -1937,22 -1951,36 +1952,36 @@@ do_error
static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
- static int __mptcp_recvmsg_mskq(struct sock *sk,
- struct msghdr *msg,
- size_t len, int flags,
+ static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg,
+ size_t len, int flags, int copied_total,
struct scm_timestamping_internal *tss,
int *cmsg_flags)
{
struct mptcp_sock *msk = mptcp_sk(sk);
struct sk_buff *skb, *tmp;
+ int total_data_len = 0;
int copied = 0;
skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) {
- u32 offset = MPTCP_SKB_CB(skb)->offset;
+ u32 delta, offset = MPTCP_SKB_CB(skb)->offset;
u32 data_len = skb->len - offset;
- u32 count = min_t(size_t, len - copied, data_len);
+ u32 count;
int err;
+ if (flags & MSG_PEEK) {
+ /* skip already peeked skbs */
+ if (total_data_len + data_len <= copied_total) {
+ total_data_len += data_len;
+ continue;
+ }
+
+ /* skip the already peeked data in the current skb */
+ delta = copied_total - total_data_len;
+ offset += delta;
+ data_len -= delta;
+ }
+
+ count = min_t(size_t, len - copied, data_len);
if (!(flags & MSG_TRUNC)) {
err = skb_copy_datagram_msg(skb, offset, msg, count);
if (unlikely(err < 0)) {
@@@ -1969,16 -1997,14 +1998,14 @@@
copied += count;
- if (count < data_len) {
- if (!(flags & MSG_PEEK)) {
+ if (!(flags & MSG_PEEK)) {
+ msk->bytes_consumed += count;
+ if (count < data_len) {
MPTCP_SKB_CB(skb)->offset += count;
MPTCP_SKB_CB(skb)->map_seq += count;
- msk->bytes_consumed += count;
+ break;
}
- break;
- }
- if (!(flags & MSG_PEEK)) {
/* avoid the indirect call, we know the destructor is
sock_rfree */
skb->destructor = NULL;
skb->sk = NULL;
@@@ -1986,7 -2012,6 +2013,6 @@@
sk_mem_uncharge(sk, skb->truesize);
__skb_unlink(skb, &sk->sk_receive_queue);
skb_attempt_defer_free(skb);
- msk->bytes_consumed += count;
}
if (copied >= len)
@@@ -2050,9 -2075,7 +2076,7 @@@ static void mptcp_rcv_space_adjust(stru
if (msk->rcvq_space.copied <= msk->rcvq_space.space)
goto new_measure;
- msk->rcvq_space.space = msk->rcvq_space.copied;
- if (mptcp_rcvbuf_grow(sk)) {
-
+ if (mptcp_rcvbuf_grow(sk, msk->rcvq_space.copied)) {
/* Make subflows follow along. If we do not do this, we
* get drops at subflow level if skbs can't be moved to
* the mptcp rx queue fast enough (announced rcv_win can
@@@ -2064,8 -2087,9 +2088,9 @@@
ssk = mptcp_subflow_tcp_sock(subflow);
slow = lock_sock_fast(ssk);
- tcp_sk(ssk)->rcvq_space.space = msk->rcvq_space.copied;
- tcp_rcvbuf_grow(ssk);
+ /* subflows can be added before tcp_init_transfer() */
+ if (tcp_sk(ssk)->rcvq_space.space)
+ tcp_rcvbuf_grow(ssk, msk->rcvq_space.copied);
unlock_sock_fast(ssk, slow);
}
}
@@@ -2184,7 -2208,8 +2209,8 @@@ static int mptcp_recvmsg(struct sock *s
while (copied < len) {
int err, bytes_read;
- bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags,
&tss, &cmsg_flags);
+ bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags,
+ copied, &tss, &cmsg_flags);
if (unlikely(bytes_read < 0)) {
if (!copied)
copied = bytes_read;
@@@ -2875,7 -2900,7 +2901,7 @@@ static void __mptcp_clear_xmit(struct s
struct mptcp_sock *msk = mptcp_sk(sk);
struct mptcp_data_frag *dtmp, *dfrag;
- WRITE_ONCE(msk->first_pending, NULL);
+ msk->first_pending = NULL;
list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
dfrag_clear(sk, dfrag);
}
@@@ -3415,9 -3440,6 +3441,6 @@@ void __mptcp_data_acked(struct sock *sk
void __mptcp_check_push(struct sock *sk, struct sock *ssk)
{
- if (!mptcp_send_head(sk))
- return;
-
if (!sock_owned_by_user(sk))
__mptcp_subflow_push_pending(sk, ssk, false);
else
diff --combined net/tls/tls_device.c
index 4d29b390aed90,71734411ff4c3..82ea407e520a0
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@@ -373,8 -373,7 +373,8 @@@ static int tls_do_allocation(struct soc
if (!offload_ctx->open_record) {
if (unlikely(!skb_page_frag_refill(prepend_size, pfrag,
sk->sk_allocation))) {
- READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
+ if (!sk->sk_bypass_prot_mem)
+
READ_ONCE(sk->sk_prot)->enter_memory_pressure(sk);
sk_stream_moderate_sndbuf(sk);
return -ENOMEM;
}
@@@ -462,7 -461,7 +462,7 @@@ static int tls_push_data(struct sock *s
/* TLS_HEADER_SIZE is not counted as part of the TLS record, and
* we need to leave room for an authentication tag.
*/
- max_open_record_len = TLS_MAX_PAYLOAD_SIZE +
+ max_open_record_len = tls_ctx->tx_max_payload_len +
prot->prepend_size;
do {
rc = tls_do_allocation(sk, ctx, pfrag, prot->prepend_size);
@@@ -724,8 -723,10 +724,10 @@@ tls_device_rx_resync_async(struct tls_o
/* shouldn't get to wraparound:
* too long in async stage, something bad happened
*/
- if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
+ if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) {
+
tls_offload_rx_resync_async_request_cancel(resync_async);
return false;
+ }
/* asynchronous stage: log all headers seq such that
* req_seq <= seq <= end_seq, and wait for real resync request
diff --combined net/wireless/nl80211.c
index ceca47cd9e251,03d07b54359a4..2187e148389de
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@@ -3544,9 -3544,6 +3544,9 @@@ static int _nl80211_parse_chandef(struc
return -EINVAL;
}
+ if (cfg80211_chandef_is_s1g(chandef))
+ chandef->width = NL80211_CHAN_WIDTH_1;
+
if (attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
enum nl80211_channel_type chantype;
@@@ -4139,8 -4136,7 +4139,7 @@@ static int nl80211_set_wiphy(struct sk_
rdev->wiphy.txq_quantum = old_txq_quantum;
}
- if (old_rts_threshold)
- kfree(old_radio_rts_threshold);
+ kfree(old_radio_rts_threshold);
return result;
}
--
LinuxNextTracking