diff options
Diffstat (limited to 'drivers/net/ethernet')
94 files changed, 1136 insertions, 601 deletions
diff --git a/drivers/net/ethernet/airoha/airoha_eth.c b/drivers/net/ethernet/airoha/airoha_eth.c index 62bcbbbe2a95..56cf9a926a83 100644 --- a/drivers/net/ethernet/airoha/airoha_eth.c +++ b/drivers/net/ethernet/airoha/airoha_eth.c @@ -3083,7 +3083,6 @@ static void airoha_remove(struct platform_device *pdev) if (!port) continue; - airoha_dev_stop(port->dev); unregister_netdev(port->dev); airoha_metadata_dst_free(port); } diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c index 42dbe8f93231..5724f8f2defd 100644 --- a/drivers/net/ethernet/airoha/airoha_ppe.c +++ b/drivers/net/ethernet/airoha/airoha_ppe.c @@ -227,7 +227,9 @@ static int airoha_ppe_get_wdma_info(struct net_device *dev, const u8 *addr, if (!dev) return -ENODEV; + rcu_read_lock(); err = dev_fill_forward_path(dev, addr, &stack); + rcu_read_unlock(); if (err) return err; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 711f295eb777..80c2c27ac9dc 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -431,7 +431,7 @@ #define MAC_SSIR_SSINC_INDEX 16 #define MAC_SSIR_SSINC_WIDTH 8 #define MAC_TCR_SS_INDEX 29 -#define MAC_TCR_SS_WIDTH 2 +#define MAC_TCR_SS_WIDTH 3 #define MAC_TCR_TE_INDEX 0 #define MAC_TCR_TE_WIDTH 1 #define MAC_TCR_VNE_INDEX 24 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 62bb4b8a68e1..23beea48ae26 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1120,7 +1120,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; - unsigned long flags; DBGPR("-->xgbe_powerdown\n"); @@ -1131,8 +1130,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller) return -EINVAL; } - spin_lock_irqsave(&pdata->lock, flags); - if (caller == XGMAC_DRIVER_CONTEXT) netif_device_detach(netdev); @@ -1148,8 +1145,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller) pdata->power_down = 1; - spin_unlock_irqrestore(&pdata->lock, flags); - DBGPR("<--xgbe_powerdown\n"); return 0; @@ -1159,7 +1154,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; - unsigned long flags; DBGPR("-->xgbe_powerup\n"); @@ -1170,8 +1164,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller) return -EINVAL; } - spin_lock_irqsave(&pdata->lock, flags); - pdata->power_down = 0; xgbe_napi_enable(pdata, 0); @@ -1186,8 +1178,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller) xgbe_start_timers(pdata); - spin_unlock_irqrestore(&pdata->lock, flags); - DBGPR("<--xgbe_powerup\n"); return 0; @@ -1281,20 +1271,25 @@ static int xgbe_start(struct xgbe_prv_data *pdata) if (ret) goto err_napi; + /* Reset the phy settings */ + ret = xgbe_phy_reset(pdata); + if (ret) + goto err_irqs; + + /* Start the phy */ ret = phy_if->phy_start(pdata); if (ret) goto err_irqs; hw_if->enable_tx(pdata); hw_if->enable_rx(pdata); + /* Synchronize flag with hardware state after enabling TX/RX. + * This prevents stale state after device restart cycles. + */ + pdata->data_path_stopped = false; udp_tunnel_nic_reset_ntf(netdev); - /* Reset the phy settings */ - ret = xgbe_phy_reset(pdata); - if (ret) - goto err_txrx; - netif_tx_start_all_queues(netdev); xgbe_start_timers(pdata); @@ -1304,10 +1299,6 @@ static int xgbe_start(struct xgbe_prv_data *pdata) return 0; -err_txrx: - hw_if->disable_rx(pdata); - hw_if->disable_tx(pdata); - err_irqs: xgbe_free_irqs(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index d1f0419edb23..7d45ea22a02e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -76,7 +76,6 @@ struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev) pdata->netdev = netdev; pdata->dev = dev; - spin_lock_init(&pdata->lock); spin_lock_init(&pdata->xpcs_lock); mutex_init(&pdata->rss_mutex); spin_lock_init(&pdata->tstamp_lock); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index c63ddb12237e..b8cf6ccfe641 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -1942,7 +1942,7 @@ static void xgbe_set_rx_adap_mode(struct xgbe_prv_data *pdata, static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int reg; + int reg; /* step 2: force PCS to send RX_ADAPT Req to PHY */ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4, @@ -1964,11 +1964,20 @@ static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata) /* Step 4: Check for Block lock */ - /* Link status is latched low, so read once to clear - * and then read again to get current state - */ - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + if (reg < 0) + goto set_mode; + + /* Link status is latched low so that momentary link drops + * can be detected. If link was already down read again + * to get the latest state. + */ + if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) { + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + if (reg < 0) + goto set_mode; + } + if (reg & MDIO_STAT1_LSTATUS) { /* If the block lock is found, update the helpers * and declare the link up @@ -2008,6 +2017,48 @@ rx_adapt_reinit: xgbe_rx_adaptation(pdata); } +/* + * xgbe_phy_stop_data_path - Stop TX/RX to prevent packet corruption + * @pdata: driver private data + * + * This function stops the data path (TX and RX) to prevent packet + * corruption during critical PHY operations like RX adaptation. + * Must be called before initiating RX adaptation when link goes down. + */ +static void xgbe_phy_stop_data_path(struct xgbe_prv_data *pdata) +{ + if (pdata->data_path_stopped) + return; + + /* Stop TX/RX to prevent packet corruption during RX adaptation */ + pdata->hw_if.disable_tx(pdata); + pdata->hw_if.disable_rx(pdata); + pdata->data_path_stopped = true; + + netif_dbg(pdata, link, pdata->netdev, + "stopping data path for RX adaptation\n"); +} + +/* + * xgbe_phy_start_data_path - Re-enable TX/RX after RX adaptation + * @pdata: driver private data + * + * This function re-enables the data path (TX and RX) after RX adaptation + * has completed successfully. Only called when link is confirmed up. + */ +static void xgbe_phy_start_data_path(struct xgbe_prv_data *pdata) +{ + if (!pdata->data_path_stopped) + return; + + pdata->hw_if.enable_rx(pdata); + pdata->hw_if.enable_tx(pdata); + pdata->data_path_stopped = false; + + netif_dbg(pdata, link, pdata->netdev, + "restarting data path after RX adaptation\n"); +} + static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata) { int reg; @@ -2801,13 +2852,27 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) if (pdata->en_rx_adap) { /* if the link is available and adaptation is done, * declare link up + * + * Note: When link is up and adaptation is done, we can + * safely re-enable the data path if it was stopped + * for adaptation. */ - if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done) + if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done) { + xgbe_phy_start_data_path(pdata); return 1; + } /* If either link is not available or adaptation is not done, * retrigger the adaptation logic. (if the mode is not set, * then issue mailbox command first) */ + + /* CRITICAL: Stop data path BEFORE triggering RX adaptation + * to prevent CRC errors from packets corrupted during + * the adaptation process. This is especially important + * when AN is OFF in 10G KR mode. + */ + xgbe_phy_stop_data_path(pdata); + if (pdata->mode_set) { xgbe_phy_rx_adaptation(pdata); } else { @@ -2815,8 +2880,11 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) xgbe_phy_set_mode(pdata, phy_data->cur_mode); } - if (pdata->rx_adapt_done) + if (pdata->rx_adapt_done) { + /* Adaptation complete, safe to re-enable data path */ + xgbe_phy_start_data_path(pdata); return 1; + } } else if (reg & MDIO_STAT1_LSTATUS) return 1; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 1269b8ce9249..438033a71523 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -1004,9 +1004,6 @@ struct xgbe_prv_data { unsigned int pp3; unsigned int pp4; - /* Overall device lock */ - spinlock_t lock; - /* XPCS indirect addressing lock */ spinlock_t xpcs_lock; unsigned int xpcs_window_def_reg; @@ -1246,6 +1243,10 @@ struct xgbe_prv_data { bool en_rx_adap; int rx_adapt_retries; bool rx_adapt_done; + /* Flag to track if data path (TX/RX) was stopped for RX adaptation. + * This prevents packet corruption during the adaptation window. + */ + bool data_path_stopped; bool mode_set; bool sph; }; diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 8283aeee35fb..dde4046cbf01 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -934,6 +934,17 @@ int arc_emac_probe(struct net_device *ndev, int interface) /* Set poll rate so that it polls every 1 ms */ arc_reg_set(priv, R_POLLRATE, clock_frequency / 1000000); + /* + * Put the device into a known quiescent state before requesting + * the IRQ. Clear only EMAC interrupt status bits here; leave the + * MDIO completion bit alone and avoid writing TXPL_MASK, which is + * used to force TX polling rather than acknowledge interrupts. + */ + arc_reg_set(priv, R_ENABLE, 0); + arc_reg_set(priv, R_STATUS, RXINT_MASK | TXINT_MASK | ERR_MASK | + TXCH_MASK | MSER_MASK | RXCR_MASK | + RXFR_MASK | RXFL_MASK); + ndev->irq = irq; dev_info(dev, "IRQ is %d\n", ndev->irq); diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index cd7dddeb91dd..9787c1857e13 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -25,7 +25,7 @@ config B44 select SSB select MII select PHYLIB - select FIXED_PHY if BCM47XX + select FIXED_PHY help If you have a network (Ethernet) controller of this type, say Y or M here. diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c index aa6d8606849f..972474893a6b 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c @@ -1152,12 +1152,6 @@ void bcmasp_enable_wol(struct bcmasp_intf *intf, bool en) } } -static void bcmasp_wol_irq_destroy(struct bcmasp_priv *priv) -{ - if (priv->wol_irq > 0) - free_irq(priv->wol_irq, priv); -} - static void bcmasp_eee_fixup(struct bcmasp_intf *intf, bool en) { u32 reg, phy_lpi_overwrite; @@ -1255,7 +1249,7 @@ static int bcmasp_probe(struct platform_device *pdev) if (priv->irq <= 0) return -EINVAL; - priv->clk = devm_clk_get_optional_enabled(dev, "sw_asp"); + priv->clk = devm_clk_get_optional(dev, "sw_asp"); if (IS_ERR(priv->clk)) return dev_err_probe(dev, PTR_ERR(priv->clk), "failed to request clock\n"); @@ -1283,6 +1277,10 @@ static int bcmasp_probe(struct platform_device *pdev) bcmasp_set_pdata(priv, pdata); + ret = clk_prepare_enable(priv->clk); + if (ret) + return dev_err_probe(dev, ret, "failed to start clock\n"); + /* Enable all clocks to ensure successful probing */ bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0); @@ -1294,8 +1292,10 @@ static int bcmasp_probe(struct platform_device *pdev) ret = devm_request_irq(&pdev->dev, priv->irq, bcmasp_isr, 0, pdev->name, priv); - if (ret) - return dev_err_probe(dev, ret, "failed to request ASP interrupt: %d", ret); + if (ret) { + dev_err(dev, "Failed to request ASP interrupt: %d", ret); + goto err_clock_disable; + } /* Register mdio child nodes */ of_platform_populate(dev->of_node, bcmasp_mdio_of_match, NULL, dev); @@ -1307,13 +1307,17 @@ static int bcmasp_probe(struct platform_device *pdev) priv->mda_filters = devm_kcalloc(dev, priv->num_mda_filters, sizeof(*priv->mda_filters), GFP_KERNEL); - if (!priv->mda_filters) - return -ENOMEM; + if (!priv->mda_filters) { + ret = -ENOMEM; + goto err_clock_disable; + } priv->net_filters = devm_kcalloc(dev, priv->num_net_filters, sizeof(*priv->net_filters), GFP_KERNEL); - if (!priv->net_filters) - return -ENOMEM; + if (!priv->net_filters) { + ret = -ENOMEM; + goto err_clock_disable; + } bcmasp_core_init_filters(priv); @@ -1322,7 +1326,8 @@ static int bcmasp_probe(struct platform_device *pdev) ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports"); if (!ports_node) { dev_warn(dev, "No ports found\n"); - return -EINVAL; + ret = -EINVAL; + goto err_clock_disable; } i = 0; @@ -1344,8 +1349,6 @@ static int bcmasp_probe(struct platform_device *pdev) */ bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE); - clk_disable_unprepare(priv->clk); - /* Now do the registration of the network ports which will take care * of managing the clock properly. */ @@ -1358,13 +1361,16 @@ static int bcmasp_probe(struct platform_device *pdev) count++; } + clk_disable_unprepare(priv->clk); + dev_info(dev, "Initialized %d port(s)\n", count); return ret; err_cleanup: - bcmasp_wol_irq_destroy(priv); bcmasp_remove_intfs(priv); +err_clock_disable: + clk_disable_unprepare(priv->clk); return ret; } @@ -1376,7 +1382,6 @@ static void bcmasp_remove(struct platform_device *pdev) if (!priv) return; - bcmasp_wol_irq_destroy(priv); bcmasp_remove_intfs(priv); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c426a41c3663..0751c0e4581a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2929,6 +2929,8 @@ static int bnxt_async_event_process(struct bnxt *bp, u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1); u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2); + if (type >= ARRAY_SIZE(bp->bs_trace)) + goto async_event_process_exit; bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset); goto async_event_process_exit; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 9a41b9e0423c..a97d651130df 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2146,7 +2146,7 @@ enum board_idx { }; #define BNXT_TRACE_BUF_MAGIC_BYTE ((u8)0xbc) -#define BNXT_TRACE_MAX 11 +#define BNXT_TRACE_MAX (DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE + 1) struct bnxt_bs_trace_info { u8 *magic_byte; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index ba47e8294fff..28d0ece2e7b1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -979,8 +979,8 @@ static int bnxt_set_channels(struct net_device *dev, if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && - netif_is_rxfh_configured(dev)) { - netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n"); + (netif_is_rxfh_configured(dev) || bp->num_rss_ctx)) { + netdev_warn(dev, "RSS table size change required, RSS table entries must be default (with no additional RSS contexts present) to proceed\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index a71cd729fde6..482a31e7b72b 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1342,8 +1342,7 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev, } } -void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, - bool tx_lpi_enabled) +void bcmgenet_eee_enable_set(struct net_device *dev, bool enable) { struct bcmgenet_priv *priv = netdev_priv(dev); u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL; @@ -1363,7 +1362,7 @@ void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, /* Enable EEE and switch to a 27Mhz clock automatically */ reg = bcmgenet_readl(priv->base + off); - if (tx_lpi_enabled) + if (enable) reg |= TBUF_EEE_EN | TBUF_PM_EN; else reg &= ~(TBUF_EEE_EN | TBUF_PM_EN); @@ -1382,14 +1381,12 @@ void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, priv->clk_eee_enabled = false; } - priv->eee.eee_enabled = enable; - priv->eee.tx_lpi_enabled = tx_lpi_enabled; } static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e) { struct bcmgenet_priv *priv = netdev_priv(dev); - struct ethtool_keee *p = &priv->eee; + int ret; if (GENET_IS_V1(priv)) return -EOPNOTSUPP; @@ -1397,17 +1394,21 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_keee *e) if (!dev->phydev) return -ENODEV; - e->tx_lpi_enabled = p->tx_lpi_enabled; + ret = phy_ethtool_get_eee(dev->phydev, e); + if (ret) + return ret; + + /* tx_lpi_timer is maintained by the MAC hardware register; the + * PHY-level eee_cfg timer is not set for GENET. + */ e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); - return phy_ethtool_get_eee(dev->phydev, e); + return 0; } static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e) { struct bcmgenet_priv *priv = netdev_priv(dev); - struct ethtool_keee *p = &priv->eee; - bool active; if (GENET_IS_V1(priv)) return -EOPNOTSUPP; @@ -1415,15 +1416,7 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_keee *e) if (!dev->phydev) return -ENODEV; - p->eee_enabled = e->eee_enabled; - - if (!p->eee_enabled) { - bcmgenet_eee_enable_set(dev, false, false); - } else { - active = phy_init_eee(dev->phydev, false) >= 0; - bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); - bcmgenet_eee_enable_set(dev, active, e->tx_lpi_enabled); - } + bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER); return phy_ethtool_set_eee(dev->phydev, e); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 5ec3979779ec..9e4110c7fdf6 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -665,8 +665,6 @@ struct bcmgenet_priv { u8 sopass[SOPASS_MAX]; struct bcmgenet_mib_counters mib; - - struct ethtool_keee eee; }; static inline bool bcmgenet_has_40bits(struct bcmgenet_priv *priv) @@ -749,7 +747,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, int bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, enum bcmgenet_power_mode mode); -void bcmgenet_eee_enable_set(struct net_device *dev, bool enable, - bool tx_lpi_enabled); +void bcmgenet_eee_enable_set(struct net_device *dev, bool enable); #endif /* __BCMGENET_H__ */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 8fb551288298..96d5d4f7f51f 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -123,7 +123,7 @@ static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv) while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS) & RBUF_STATUS_WOL)) { retries++; - if (retries > 5) { + if (retries > 50) { netdev_crit(dev, "polling wol mode timeout\n"); return -ETIMEDOUT; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 38f854b94a79..a4e0d5a68268 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -29,7 +29,6 @@ static void bcmgenet_mac_config(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; u32 reg, cmd_bits = 0; - bool active; /* speed */ if (phydev->speed == SPEED_1000) @@ -90,10 +89,6 @@ static void bcmgenet_mac_config(struct net_device *dev) bcmgenet_umac_writel(priv, reg, UMAC_CMD); spin_unlock_bh(&priv->reg_lock); - active = phy_init_eee(phydev, 0) >= 0; - bcmgenet_eee_enable_set(dev, - priv->eee.eee_enabled && active, - priv->eee.tx_lpi_enabled); } /* setup netdev link state when PHY link status change and @@ -113,6 +108,8 @@ void bcmgenet_mii_setup(struct net_device *dev) bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); } + bcmgenet_eee_enable_set(dev, phydev->enable_tx_lpi); + phy_print_status(phydev); } @@ -412,6 +409,9 @@ int bcmgenet_mii_probe(struct net_device *dev) /* Indicate that the MAC is responsible for PHY PM */ dev->phydev->mac_managed_pm = true; + if (!GENET_IS_V1(priv)) + phy_support_eee(dev->phydev); + return 0; } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 2328fce33644..21a5dd342724 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -17029,6 +17029,13 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) return err; } +static int tg3_is_default_mac_address(u8 *addr) +{ + static const u8 default_mac_address[ETH_ALEN] = { 0x00, 0x10, 0x18, 0x00, 0x00, 0x00 }; + + return ether_addr_equal(default_mac_address, addr); +} + static int tg3_get_device_address(struct tg3 *tp, u8 *addr) { u32 hi, lo, mac_offset; @@ -17102,6 +17109,10 @@ static int tg3_get_device_address(struct tg3 *tp, u8 *addr) if (!is_valid_ether_addr(addr)) return -EINVAL; + + if (tg3_is_default_mac_address(addr)) + return device_get_mac_address(&tp->pdev->dev, addr); + return 0; } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 5bc35f651ebd..99e7d5cf3786 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -36,6 +36,7 @@ #include <linux/tcp.h> #include <linux/types.h> #include <linux/udp.h> +#include <linux/gcd.h> #include <net/pkt_sched.h> #include "macb.h" @@ -668,6 +669,97 @@ static void macb_mac_link_down(struct phylink_config *config, unsigned int mode, netif_tx_stop_all_queues(ndev); } +/* Use juggling algorithm to left rotate tx ring and tx skb array */ +static void gem_shuffle_tx_one_ring(struct macb_queue *queue) +{ + unsigned int head, tail, count, ring_size, desc_size; + struct macb_tx_skb tx_skb, *skb_curr, *skb_next; + struct macb_dma_desc *desc_curr, *desc_next; + unsigned int i, cycles, shift, curr, next; + struct macb *bp = queue->bp; + unsigned char desc[24]; + unsigned long flags; + + desc_size = macb_dma_desc_get_size(bp); + + if (WARN_ON_ONCE(desc_size > ARRAY_SIZE(desc))) + return; + + spin_lock_irqsave(&queue->tx_ptr_lock, flags); + head = queue->tx_head; + tail = queue->tx_tail; + ring_size = bp->tx_ring_size; + count = CIRC_CNT(head, tail, ring_size); + + if (!(tail % ring_size)) + goto unlock; + + if (!count) { + queue->tx_head = 0; + queue->tx_tail = 0; + goto unlock; + } + + shift = tail % ring_size; + cycles = gcd(ring_size, shift); + + for (i = 0; i < cycles; i++) { + memcpy(&desc, macb_tx_desc(queue, i), desc_size); + memcpy(&tx_skb, macb_tx_skb(queue, i), + sizeof(struct macb_tx_skb)); + + curr = i; + next = (curr + shift) % ring_size; + + while (next != i) { + desc_curr = macb_tx_desc(queue, curr); + desc_next = macb_tx_desc(queue, next); + + memcpy(desc_curr, desc_next, desc_size); + + if (next == ring_size - 1) + desc_curr->ctrl &= ~MACB_BIT(TX_WRAP); + if (curr == ring_size - 1) + desc_curr->ctrl |= MACB_BIT(TX_WRAP); + + skb_curr = macb_tx_skb(queue, curr); + skb_next = macb_tx_skb(queue, next); + memcpy(skb_curr, skb_next, sizeof(struct macb_tx_skb)); + + curr = next; + next = (curr + shift) % ring_size; + } + + desc_curr = macb_tx_desc(queue, curr); + memcpy(desc_curr, &desc, desc_size); + if (i == ring_size - 1) + desc_curr->ctrl &= ~MACB_BIT(TX_WRAP); + if (curr == ring_size - 1) + desc_curr->ctrl |= MACB_BIT(TX_WRAP); + memcpy(macb_tx_skb(queue, curr), &tx_skb, + sizeof(struct macb_tx_skb)); + } + + queue->tx_head = count; + queue->tx_tail = 0; + + /* Make descriptor updates visible to hardware */ + wmb(); + +unlock: + spin_unlock_irqrestore(&queue->tx_ptr_lock, flags); +} + +/* Rotate the queue so that the tail is at index 0 */ +static void gem_shuffle_tx_rings(struct macb *bp) +{ + struct macb_queue *queue; + int q; + + for (q = 0, queue = bp->queues; q < bp->num_queues; q++, queue++) + gem_shuffle_tx_one_ring(queue); +} + static void macb_mac_link_up(struct phylink_config *config, struct phy_device *phy, unsigned int mode, phy_interface_t interface, @@ -706,8 +798,6 @@ static void macb_mac_link_up(struct phylink_config *config, ctrl |= MACB_BIT(PAE); for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - queue->tx_head = 0; - queue->tx_tail = 0; queue_writel(queue, IER, bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); } @@ -721,8 +811,10 @@ static void macb_mac_link_up(struct phylink_config *config, spin_unlock_irqrestore(&bp->lock, flags); - if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { macb_set_tx_clk(bp, speed); + gem_shuffle_tx_rings(bp); + } /* Enable Rx and Tx; Enable PTP unicast */ ctrl = macb_readl(bp, NCR); @@ -979,7 +1071,7 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budge } if (tx_skb->skb) { - napi_consume_skb(tx_skb->skb, budget); + dev_consume_skb_any(tx_skb->skb); tx_skb->skb = NULL; } } @@ -2577,6 +2669,14 @@ static void macb_init_tieoff(struct macb *bp) desc->ctrl = 0; } +static void gem_init_rx_ring(struct macb_queue *queue) +{ + queue->rx_tail = 0; + queue->rx_prepared_head = 0; + + gem_rx_refill(queue); +} + static void gem_init_rings(struct macb *bp) { struct macb_queue *queue; @@ -2594,10 +2694,7 @@ static void gem_init_rings(struct macb *bp) queue->tx_head = 0; queue->tx_tail = 0; - queue->rx_tail = 0; - queue->rx_prepared_head = 0; - - gem_rx_refill(queue); + gem_init_rx_ring(queue); } macb_init_tieoff(bp); @@ -3127,7 +3224,7 @@ static void gem_get_ethtool_stats(struct net_device *dev, spin_lock_irq(&bp->stats_lock); gem_update_stats(bp); memcpy(data, &bp->ethtool_stats, sizeof(u64) - * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); + * (GEM_STATS_LEN + QUEUE_STATS_LEN * bp->num_queues)); spin_unlock_irq(&bp->stats_lock); } @@ -3886,6 +3983,9 @@ static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) struct macb *bp = netdev_priv(netdev); int ret; + if (!(netdev->hw_features & NETIF_F_NTUPLE)) + return -EOPNOTSUPP; + switch (cmd->cmd) { case ETHTOOL_SRXCLSRLINS: if ((cmd->fs.location >= bp->max_tuples) @@ -5676,9 +5776,9 @@ static int __maybe_unused macb_suspend(struct device *dev) struct macb_queue *queue; struct in_device *idev; unsigned long flags; + u32 tmp, ifa_local; unsigned int q; int err; - u32 tmp; if (!device_may_wakeup(&bp->dev->dev)) phy_exit(bp->phy); @@ -5687,14 +5787,21 @@ static int __maybe_unused macb_suspend(struct device *dev) return 0; if (bp->wol & MACB_WOL_ENABLED) { - /* Check for IP address in WOL ARP mode */ - idev = __in_dev_get_rcu(bp->dev); - if (idev) - ifa = rcu_dereference(idev->ifa_list); - if ((bp->wolopts & WAKE_ARP) && !ifa) { - netdev_err(netdev, "IP address not assigned as required by WoL walk ARP\n"); - return -EOPNOTSUPP; + if (bp->wolopts & WAKE_ARP) { + /* Check for IP address in WOL ARP mode */ + rcu_read_lock(); + idev = __in_dev_get_rcu(bp->dev); + if (idev) + ifa = rcu_dereference(idev->ifa_list); + if (!ifa) { + rcu_read_unlock(); + netdev_err(netdev, "IP address not assigned as required by WoL walk ARP\n"); + return -EOPNOTSUPP; + } + ifa_local = be32_to_cpu(ifa->ifa_local); + rcu_read_unlock(); } + spin_lock_irqsave(&bp->lock, flags); /* Disable Tx and Rx engines before disabling the queues, @@ -5733,8 +5840,9 @@ static int __maybe_unused macb_suspend(struct device *dev) if (bp->wolopts & WAKE_ARP) { tmp |= MACB_BIT(ARP); /* write IP address into register */ - tmp |= MACB_BFEXT(IP, be32_to_cpu(ifa->ifa_local)); + tmp |= MACB_BFEXT(IP, ifa_local); } + spin_unlock_irqrestore(&bp->lock, flags); /* Change interrupt handler and * Enable WoL IRQ on queue 0 @@ -5747,11 +5855,12 @@ static int __maybe_unused macb_suspend(struct device *dev) dev_err(dev, "Unable to request IRQ %d (error %d)\n", bp->queues[0].irq, err); - spin_unlock_irqrestore(&bp->lock, flags); return err; } + spin_lock_irqsave(&bp->lock, flags); queue_writel(bp->queues, IER, GEM_BIT(WOL)); gem_writel(bp, WOL, tmp); + spin_unlock_irqrestore(&bp->lock, flags); } else { err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, IRQF_SHARED, netdev->name, bp->queues); @@ -5759,13 +5868,13 @@ static int __maybe_unused macb_suspend(struct device *dev) dev_err(dev, "Unable to request IRQ %d (error %d)\n", bp->queues[0].irq, err); - spin_unlock_irqrestore(&bp->lock, flags); return err; } + spin_lock_irqsave(&bp->lock, flags); queue_writel(bp->queues, IER, MACB_BIT(WOL)); macb_writel(bp, WOL, tmp); + spin_unlock_irqrestore(&bp->lock, flags); } - spin_unlock_irqrestore(&bp->lock, flags); enable_irq_wake(bp->queues[0].irq); } @@ -5832,6 +5941,8 @@ static int __maybe_unused macb_resume(struct device *dev) queue_readl(bp->queues, ISR); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(bp->queues, ISR, -1); + spin_unlock_irqrestore(&bp->lock, flags); + /* Replace interrupt handler on queue 0 */ devm_free_irq(dev, bp->queues[0].irq, bp->queues); err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt, @@ -5840,10 +5951,8 @@ static int __maybe_unused macb_resume(struct device *dev) dev_err(dev, "Unable to request IRQ %d (error %d)\n", bp->queues[0].irq, err); - spin_unlock_irqrestore(&bp->lock, flags); return err; } - spin_unlock_irqrestore(&bp->lock, flags); disable_irq_wake(bp->queues[0].irq); @@ -5855,8 +5964,18 @@ static int __maybe_unused macb_resume(struct device *dev) rtnl_unlock(); } + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) + macb_init_buffers(bp); + for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { + if (macb_is_gem(bp)) + gem_init_rx_ring(queue); + else + macb_init_rx_ring(queue); + } + napi_enable(&queue->napi_rx); napi_enable(&queue->napi_tx); } diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index c9e77819196e..d91f7b1aa39c 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -357,8 +357,10 @@ void gem_ptp_remove(struct net_device *ndev) { struct macb *bp = netdev_priv(ndev); - if (bp->ptp_clock) + if (bp->ptp_clock) { ptp_clock_unregister(bp->ptp_clock); + bp->ptp_clock = NULL; + } gem_ptp_clear_timer(bp); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index fc0e0f36186e..52c1cb9cb7e0 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -1533,7 +1533,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) if_id = (status & 0xFFFF0000) >> 16; if (if_id >= ethsw->sw_attr.num_ifs) { dev_err(dev, "Invalid if_id %d in IRQ status\n", if_id); - goto out; + goto out_clear; } port_priv = ethsw->ports[if_id]; @@ -1553,6 +1553,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) dpaa2_switch_port_connect_mac(port_priv); } +out_clear: err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, DPSW_IRQ_INDEX_IF, status); if (err) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 70768392912c..a146ceaf2ed6 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -3467,7 +3467,7 @@ static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i, priv->rx_ring[i] = bdr; err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0, - ENETC_RXB_DMA_SIZE_XDP); + ENETC_RXB_TRUESIZE); if (err) goto free_vector; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index fed89d4f1e1d..2fe140ddebb2 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -813,6 +813,8 @@ static void enetc_get_ringparam(struct net_device *ndev, { struct enetc_ndev_priv *priv = netdev_priv(ndev); + ring->rx_max_pending = priv->rx_bd_count; + ring->tx_max_pending = priv->tx_bd_count; ring->rx_pending = priv->rx_bd_count; ring->tx_pending = priv->tx_bd_count; diff --git a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c index 7fd39f895290..92a0f824dae7 100644 --- a/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c +++ b/drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c @@ -333,11 +333,13 @@ static int netc_get_phy_addr(struct device_node *np) mdio_node = of_get_child_by_name(np, "mdio"); if (!mdio_node) - return 0; + return -ENODEV; phy_node = of_get_next_child(mdio_node, NULL); - if (!phy_node) + if (!phy_node) { + err = -ENODEV; goto of_put_mdio_node; + } err = of_property_read_u32(phy_node, "reg", &addr); if (err) @@ -423,6 +425,9 @@ static int imx95_enetc_mdio_phyaddr_config(struct platform_device *pdev) addr = netc_get_phy_addr(gchild); if (addr < 0) { + if (addr == -ENODEV) + continue; + dev_err(dev, "Failed to get PHY address\n"); return addr; } @@ -433,12 +438,6 @@ static int imx95_enetc_mdio_phyaddr_config(struct platform_device *pdev) return -EINVAL; } - /* The default value of LaBCR[MDIO_PHYAD_PRTAD ] is - * 0, so no need to set the register. - */ - if (!addr) - continue; - switch (bus_devfn) { case IMX95_ENETC0_BUS_DEVFN: netc_reg_write(priv->ierb, IERB_LBCR(0), @@ -578,16 +577,13 @@ static int imx94_enetc_mdio_phyaddr_config(struct netc_blk_ctrl *priv, addr = netc_get_phy_addr(np); if (addr < 0) { + if (addr == -ENODEV) + return 0; + dev_err(dev, "Failed to get PHY address\n"); return addr; } - /* The default value of LaBCR[MDIO_PHYAD_PRTAD] is 0, - * so no need to set the register. - */ - if (!addr) - return 0; - if (phy_mask & BIT(addr)) { dev_err(dev, "Find same PHY address in EMDIO and ENETC node\n"); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 3d7648a119e5..9b09eb144b81 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -2952,8 +2952,6 @@ static int e1000_tx_map(struct e1000_adapter *adapter, dma_error: dev_err(&pdev->dev, "TX DMA map failed\n"); buffer_info->dma = 0; - if (count) - count--; while (count--) { if (i == 0) diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index ba331899d186..d4a1041e456d 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -33,6 +33,7 @@ /* Extended Device Control */ #define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ +#define E1000_CTRL_EXT_DPG_EN 0x00000008 /* Dynamic Power Gating Enable */ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ #define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index aa08f397988e..63ebe00376f5 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -117,7 +117,8 @@ enum e1000_boards { board_pch_cnp, board_pch_tgp, board_pch_adp, - board_pch_mtp + board_pch_mtp, + board_pch_ptp }; struct e1000_ps_page { @@ -527,6 +528,7 @@ extern const struct e1000_info e1000_pch_cnp_info; extern const struct e1000_info e1000_pch_tgp_info; extern const struct e1000_info e1000_pch_adp_info; extern const struct e1000_info e1000_pch_mtp_info; +extern const struct e1000_info e1000_pch_ptp_info; extern const struct e1000_info e1000_es2_info; void e1000e_ptp_init(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index fc8ed38aa095..c7ac599e5a7a 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -118,8 +118,6 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_ARL_I219_V24 0x57A1 #define E1000_DEV_ID_PCH_PTP_I219_LM25 0x57B3 #define E1000_DEV_ID_PCH_PTP_I219_V25 0x57B4 -#define E1000_DEV_ID_PCH_PTP_I219_LM26 0x57B5 -#define E1000_DEV_ID_PCH_PTP_I219_V26 0x57B6 #define E1000_DEV_ID_PCH_PTP_I219_LM27 0x57B7 #define E1000_DEV_ID_PCH_PTP_I219_V27 0x57B8 #define E1000_DEV_ID_PCH_NVL_I219_LM29 0x57B9 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 0ff8688ac3b8..dea208db1be5 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -528,7 +528,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) phy->id = e1000_phy_unknown; - if (hw->mac.type == e1000_pch_mtp) { + if (hw->mac.type == e1000_pch_mtp || hw->mac.type == e1000_pch_ptp) { phy->retry_count = 2; e1000e_enable_phy_retry(hw); } @@ -4932,6 +4932,15 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) reg |= E1000_KABGTXD_BGSQLBIAS; ew32(KABGTXD, reg); + /* The hardware reset value of the DPG_EN bit is 1. + * Clear DPG_EN to prevent unexpected autonomous power gating. + */ + if (hw->mac.type >= e1000_pch_ptp) { + reg = er32(CTRL_EXT); + reg &= ~E1000_CTRL_EXT_DPG_EN; + ew32(CTRL_EXT, reg); + } + return 0; } @@ -6208,3 +6217,23 @@ const struct e1000_info e1000_pch_mtp_info = { .phy_ops = &ich8_phy_ops, .nvm_ops = &spt_nvm_ops, }; + +const struct e1000_info e1000_pch_ptp_info = { + .mac = e1000_pch_ptp, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9022, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &spt_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index fd4b117ed20f..9befdacd6730 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -55,6 +55,7 @@ static const struct e1000_info *e1000_info_tbl[] = { [board_pch_tgp] = &e1000_pch_tgp_info, [board_pch_adp] = &e1000_pch_adp_info, [board_pch_mtp] = &e1000_pch_mtp_info, + [board_pch_ptp] = &e1000_pch_ptp_info, }; struct e1000_reg_info { @@ -5651,8 +5652,6 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, dma_error: dev_err(&pdev->dev, "Tx DMA map failed\n"); buffer_info->dma = 0; - if (count) - count--; while (count--) { if (i == 0) @@ -7922,14 +7921,12 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ARL_I219_LM24), board_pch_mtp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ARL_I219_V24), board_pch_mtp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM25), board_pch_mtp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V25), board_pch_mtp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM26), board_pch_mtp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V26), board_pch_mtp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM27), board_pch_mtp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V27), board_pch_mtp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_NVL_I219_LM29), board_pch_mtp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_NVL_I219_V29), board_pch_mtp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM25), board_pch_ptp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V25), board_pch_ptp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_LM27), board_pch_ptp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_PTP_I219_V27), board_pch_ptp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_NVL_I219_LM29), board_pch_ptp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_NVL_I219_V29), board_pch_ptp }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 7b9e147d7365..926d001b2150 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3569,6 +3569,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) u16 pf_q = vsi->base_queue + ring->queue_index; struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_rxq rx_ctx; + u32 xdp_frame_sz; int err = 0; bool ok; @@ -3578,49 +3579,47 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) memset(&rx_ctx, 0, sizeof(rx_ctx)); ring->rx_buf_len = vsi->rx_buf_len; + xdp_frame_sz = i40e_rx_pg_size(ring) / 2; /* XDP RX-queue info only needed for RX rings exposed to XDP */ if (ring->vsi->type != I40E_VSI_MAIN) goto skip; - if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { - err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, - ring->queue_index, - ring->q_vector->napi.napi_id, - ring->rx_buf_len); - if (err) - return err; - } - ring->xsk_pool = i40e_xsk_pool(ring); if (ring->xsk_pool) { - xdp_rxq_info_unreg(&ring->xdp_rxq); + xdp_frame_sz = xsk_pool_get_rx_frag_step(ring->xsk_pool); ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->queue_index, ring->q_vector->napi.napi_id, - ring->rx_buf_len); + xdp_frame_sz); if (err) return err; err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); if (err) - return err; + goto unreg_xdp; dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", ring->queue_index); } else { + err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, + ring->queue_index, + ring->q_vector->napi.napi_id, + xdp_frame_sz); + if (err) + return err; err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, MEM_TYPE_PAGE_SHARED, NULL); if (err) - return err; + goto unreg_xdp; } skip: - xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq); + xdp_init_buff(&ring->xdp, xdp_frame_sz, &ring->xdp_rxq); rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); @@ -3654,7 +3653,8 @@ skip: dev_info(&vsi->back->pdev->dev, "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", ring->queue_index, pf_q, err); - return -ENOMEM; + err = -ENOMEM; + goto unreg_xdp; } /* set the context in the HMC */ @@ -3663,7 +3663,8 @@ skip: dev_info(&vsi->back->pdev->dev, "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n", ring->queue_index, pf_q, err); - return -ENOMEM; + err = -ENOMEM; + goto unreg_xdp; } /* configure Rx buffer alignment */ @@ -3671,7 +3672,8 @@ skip: if (I40E_2K_TOO_SMALL_WITH_PADDING) { dev_info(&vsi->back->pdev->dev, "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n"); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto unreg_xdp; } clear_ring_build_skb_enabled(ring); } else { @@ -3701,6 +3703,11 @@ skip: } return 0; +unreg_xdp: + if (ring->vsi->type == I40E_VSI_MAIN) + xdp_rxq_info_unreg(&ring->xdp_rxq); + + return err; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h index 759f3d1c4c8f..dde0ccd789ed 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_trace.h +++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h @@ -88,7 +88,7 @@ TRACE_EVENT(i40e_napi_poll, __entry->rx_clean_complete = rx_clean_complete; __entry->tx_clean_complete = tx_clean_complete; __entry->irq_num = q->irq_num; - __entry->curr_cpu = get_cpu(); + __entry->curr_cpu = smp_processor_id(); __assign_str(qname); __assign_str(dev_name); __assign_bitmask(irq_affinity, cpumask_bits(&q->affinity_mask), diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 34db7d8866b0..894f2d06d39d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1470,6 +1470,9 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + if (rx_ring->xsk_pool) { i40e_xsk_clean_rx_ring(rx_ring); goto skip_free; @@ -1527,8 +1530,6 @@ skip_free: void i40e_free_rx_resources(struct i40e_ring *rx_ring) { i40e_clean_rx_ring(rx_ring); - if (rx_ring->vsi->type == I40E_VSI_MAIN) - xdp_rxq_info_unreg(&rx_ring->xdp_rxq); rx_ring->xdp_prog = NULL; kfree(rx_ring->rx_bi); rx_ring->rx_bi = NULL; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index fdf40f8fb239..a26c3d47ec15 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -3833,10 +3833,10 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) cfilter.n_proto = ETH_P_IP; if (mask.dst_ip[0] & tcf.dst_ip[0]) memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip, - ARRAY_SIZE(tcf.dst_ip)); - else if (mask.src_ip[0] & tcf.dst_ip[0]) + sizeof(cfilter.ip.v4.dst_ip)); + else if (mask.src_ip[0] & tcf.src_ip[0]) memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip, - ARRAY_SIZE(tcf.dst_ip)); + sizeof(cfilter.ip.v4.src_ip)); break; case VIRTCHNL_TCP_V6_FLOW: cfilter.n_proto = ETH_P_IPV6; @@ -3891,7 +3891,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg) /* for ipv6, mask is set for all sixteen bytes (4 words) */ if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3]) if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6, - sizeof(cfilter.ip.v6.src_ip6))) + sizeof(cfilter.ip.v6.dst_ip6))) continue; if (mask.vlan_id) if (cfilter.vlan_id != cf->vlan_id) @@ -3979,10 +3979,10 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg) cfilter->n_proto = ETH_P_IP; if (mask.dst_ip[0] & tcf.dst_ip[0]) memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip, - ARRAY_SIZE(tcf.dst_ip)); - else if (mask.src_ip[0] & tcf.dst_ip[0]) + sizeof(cfilter->ip.v4.dst_ip)); + else if (mask.src_ip[0] & tcf.src_ip[0]) memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip, - ARRAY_SIZE(tcf.dst_ip)); + sizeof(cfilter->ip.v4.src_ip)); break; case VIRTCHNL_TCP_V6_FLOW: cfilter->n_proto = ETH_P_IPV6; diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index a87e0c6d4017..e9fb0a0919e3 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -260,7 +260,6 @@ struct iavf_adapter { struct work_struct adminq_task; struct work_struct finish_config; wait_queue_head_t down_waitqueue; - wait_queue_head_t reset_waitqueue; wait_queue_head_t vc_waitqueue; struct iavf_q_vector *q_vectors; struct list_head vlan_filter_list; @@ -626,5 +625,5 @@ void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter); void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter); struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, const u8 *macaddr); -int iavf_wait_for_reset(struct iavf_adapter *adapter); +void iavf_reset_step(struct iavf_adapter *adapter); #endif /* _IAVF_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index f3a1b2fb9bf8..1cd1f3f2930a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -313,14 +313,13 @@ static int iavf_get_sset_count(struct net_device *netdev, int sset) { /* Report the maximum number queues, even if not every queue is * currently configured. Since allocation of queues is in pairs, - * use netdev->real_num_tx_queues * 2. The real_num_tx_queues is set - * at device creation and never changes. + * use netdev->num_tx_queues * 2. The num_tx_queues is set at + * device creation and never changes. */ if (sset == ETH_SS_STATS) return IAVF_STATS_LEN + - (IAVF_QUEUE_STATS_LEN * 2 * - netdev->real_num_tx_queues); + (IAVF_QUEUE_STATS_LEN * 2 * netdev->num_tx_queues); else return -EINVAL; } @@ -345,19 +344,19 @@ static void iavf_get_ethtool_stats(struct net_device *netdev, iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats); rcu_read_lock(); - /* As num_active_queues describe both tx and rx queues, we can use - * it to iterate over rings' stats. + /* Use num_tx_queues to report stats for the maximum number of queues. + * Queues beyond num_active_queues will report zero. */ - for (i = 0; i < adapter->num_active_queues; i++) { - struct iavf_ring *ring; + for (i = 0; i < netdev->num_tx_queues; i++) { + struct iavf_ring *tx_ring = NULL, *rx_ring = NULL; - /* Tx rings stats */ - ring = &adapter->tx_rings[i]; - iavf_add_queue_stats(&data, ring); + if (i < adapter->num_active_queues) { + tx_ring = &adapter->tx_rings[i]; + rx_ring = &adapter->rx_rings[i]; + } - /* Rx rings stats */ - ring = &adapter->rx_rings[i]; - iavf_add_queue_stats(&data, ring); + iavf_add_queue_stats(&data, tx_ring); + iavf_add_queue_stats(&data, rx_ring); } rcu_read_unlock(); } @@ -376,9 +375,9 @@ static void iavf_get_stat_strings(struct net_device *netdev, u8 *data) iavf_add_stat_strings(&data, iavf_gstrings_stats); /* Queues are always allocated in pairs, so we just use - * real_num_tx_queues for both Tx and Rx queues. + * num_tx_queues for both Tx and Rx queues. */ - for (i = 0; i < netdev->real_num_tx_queues; i++) { + for (i = 0; i < netdev->num_tx_queues; i++) { iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, "tx", i); iavf_add_stat_strings(&data, iavf_gstrings_queue_stats, @@ -492,7 +491,6 @@ static int iavf_set_ringparam(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); u32 new_rx_count, new_tx_count; - int ret = 0; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; @@ -537,13 +535,11 @@ static int iavf_set_ringparam(struct net_device *netdev, } if (netif_running(netdev)) { - iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); - ret = iavf_wait_for_reset(adapter); - if (ret) - netdev_warn(netdev, "Changing ring parameters timeout or interrupted waiting for reset"); + adapter->flags |= IAVF_FLAG_RESET_NEEDED; + iavf_reset_step(adapter); } - return ret; + return 0; } /** @@ -1723,7 +1719,6 @@ static int iavf_set_channels(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); u32 num_req = ch->combined_count; - int ret = 0; if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) && adapter->num_tc) { @@ -1745,13 +1740,10 @@ static int iavf_set_channels(struct net_device *netdev, adapter->num_req_queues = num_req; adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED; - iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); - - ret = iavf_wait_for_reset(adapter); - if (ret) - netdev_warn(netdev, "Changing channel count timeout or interrupted waiting for reset"); + adapter->flags |= IAVF_FLAG_RESET_NEEDED; + iavf_reset_step(adapter); - return ret; + return 0; } /** diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index bceaf4b1b85d..dad001abc908 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -186,31 +186,6 @@ static bool iavf_is_reset_in_progress(struct iavf_adapter *adapter) } /** - * iavf_wait_for_reset - Wait for reset to finish. - * @adapter: board private structure - * - * Returns 0 if reset finished successfully, negative on timeout or interrupt. - */ -int iavf_wait_for_reset(struct iavf_adapter *adapter) -{ - int ret = wait_event_interruptible_timeout(adapter->reset_waitqueue, - !iavf_is_reset_in_progress(adapter), - msecs_to_jiffies(5000)); - - /* If ret < 0 then it means wait was interrupted. - * If ret == 0 then it means we got a timeout while waiting - * for reset to finish. - * If ret > 0 it means reset has finished. - */ - if (ret > 0) - return 0; - else if (ret < 0) - return -EINTR; - else - return -EBUSY; -} - -/** * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure * @mem: ptr to mem struct to fill out @@ -782,10 +757,13 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, adapter->num_vlan_filters++; iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER); } else if (f->state == IAVF_VLAN_REMOVE) { - /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed. - * We can safely only change the state here. + /* Re-add the filter since we cannot tell whether the + * pending delete has already been processed by the PF. + * A duplicate add is harmless. */ - f->state = IAVF_VLAN_ACTIVE; + f->state = IAVF_VLAN_ADD; + iavf_schedule_aq_request(adapter, + IAVF_FLAG_AQ_ADD_VLAN_FILTER); } clearout: @@ -2793,7 +2771,22 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter) netdev->watchdog_timeo = 5 * HZ; netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = LIBIE_MAX_MTU; + + /* PF/VF API: vf_res->max_mtu is max frame size (not MTU). + * Convert to MTU. + */ + if (!adapter->vf_res->max_mtu) { + netdev->max_mtu = LIBIE_MAX_MTU; + } else if (adapter->vf_res->max_mtu < LIBETH_RX_LL_LEN + ETH_MIN_MTU || + adapter->vf_res->max_mtu > + LIBETH_RX_LL_LEN + LIBIE_MAX_MTU) { + netdev_warn_once(adapter->netdev, + "invalid max frame size %d from PF, using default MTU %d", + adapter->vf_res->max_mtu, LIBIE_MAX_MTU); + netdev->max_mtu = LIBIE_MAX_MTU; + } else { + netdev->max_mtu = adapter->vf_res->max_mtu - LIBETH_RX_LL_LEN; + } if (!is_valid_ether_addr(adapter->hw.mac.addr)) { dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", @@ -3021,6 +3014,8 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; + iavf_ptp_release(adapter); + /* We don't use netif_running() because it may be true prior to * ndo_open() returning, so we can't assume it means all our open * tasks have finished, since we're not holding the rtnl_lock here. @@ -3096,18 +3091,16 @@ static void iavf_reconfig_qs_bw(struct iavf_adapter *adapter) } /** - * iavf_reset_task - Call-back task to handle hardware reset - * @work: pointer to work_struct + * iavf_reset_step - Perform the VF reset sequence + * @adapter: board private structure * - * During reset we need to shut down and reinitialize the admin queue - * before we can use it to communicate with the PF again. We also clear - * and reinit the rings because that context is lost as well. - **/ -static void iavf_reset_task(struct work_struct *work) + * Requests a reset from PF, polls for completion, and reconfigures + * the driver. Caller must hold the netdev instance lock. + * + * This can sleep for several seconds while polling HW registers. + */ +void iavf_reset_step(struct iavf_adapter *adapter) { - struct iavf_adapter *adapter = container_of(work, - struct iavf_adapter, - reset_task); struct virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; struct iavf_hw *hw = &adapter->hw; @@ -3118,7 +3111,7 @@ static void iavf_reset_task(struct work_struct *work) int i = 0, err; bool running; - netdev_lock(netdev); + netdev_assert_locked(netdev); iavf_misc_irq_disable(adapter); if (adapter->flags & IAVF_FLAG_RESET_NEEDED) { @@ -3163,7 +3156,6 @@ static void iavf_reset_task(struct work_struct *work) dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", reg_val); iavf_disable_vf(adapter); - netdev_unlock(netdev); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -3175,7 +3167,6 @@ continue_reset: iavf_startup(adapter); queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); - netdev_unlock(netdev); return; } @@ -3196,6 +3187,8 @@ continue_reset: iavf_change_state(adapter, __IAVF_RESETTING); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; + iavf_ptp_release(adapter); + /* free the Tx/Rx rings and descriptors, might be better to just * re-use them sometime in the future */ @@ -3316,9 +3309,6 @@ continue_reset: adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; - wake_up(&adapter->reset_waitqueue); - netdev_unlock(netdev); - return; reset_err: if (running) { @@ -3327,10 +3317,21 @@ reset_err: } iavf_disable_vf(adapter); - netdev_unlock(netdev); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); } +static void iavf_reset_task(struct work_struct *work) +{ + struct iavf_adapter *adapter = container_of(work, + struct iavf_adapter, + reset_task); + struct net_device *netdev = adapter->netdev; + + netdev_lock(netdev); + iavf_reset_step(adapter); + netdev_unlock(netdev); +} + /** * iavf_adminq_task - worker thread to clean the admin queue * @work: pointer to work_struct containing our data @@ -4596,22 +4597,17 @@ static int iavf_close(struct net_device *netdev) static int iavf_change_mtu(struct net_device *netdev, int new_mtu) { struct iavf_adapter *adapter = netdev_priv(netdev); - int ret = 0; netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) { - iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED); - ret = iavf_wait_for_reset(adapter); - if (ret < 0) - netdev_warn(netdev, "MTU change interrupted waiting for reset"); - else if (ret) - netdev_warn(netdev, "MTU change timed out waiting for reset"); + adapter->flags |= IAVF_FLAG_RESET_NEEDED; + iavf_reset_step(adapter); } - return ret; + return 0; } /** @@ -5416,9 +5412,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup the wait queue for indicating transition to down status */ init_waitqueue_head(&adapter->down_waitqueue); - /* Setup the wait queue for indicating transition to running state */ - init_waitqueue_head(&adapter->reset_waitqueue); - /* Setup the wait queue for indicating virtchannel events */ init_waitqueue_head(&adapter->vc_waitqueue); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 88156082a41d..a52c100dcbc5 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -2736,7 +2736,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, case VIRTCHNL_OP_ENABLE_QUEUES: /* enable transmits */ iavf_irq_enable(adapter, true); - wake_up(&adapter->reset_waitqueue); adapter->flags &= ~IAVF_FLAG_QUEUES_DISABLED; break; case VIRTCHNL_OP_DISABLE_QUEUES: diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index 6c72bd15db6d..6144cee8034d 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -1360,7 +1360,7 @@ ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, cdev = pf->cdev_info; if (!cdev) - return -ENODEV; + return -EOPNOTSUPP; ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2); @@ -1427,7 +1427,7 @@ ice_devlink_enable_iw_get(struct devlink *devlink, u32 id, cdev = pf->cdev_info; if (!cdev) - return -ENODEV; + return -EOPNOTSUPP; ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP); diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index def7efa15447..eb3a48330cc1 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -840,6 +840,28 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid) } /** + * ice_get_max_txq - return the maximum number of Tx queues for in a PF + * @pf: PF structure + * + * Return: maximum number of Tx queues + */ +static inline int ice_get_max_txq(struct ice_pf *pf) +{ + return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq); +} + +/** + * ice_get_max_rxq - return the maximum number of Rx queues for in a PF + * @pf: PF structure + * + * Return: maximum number of Rx queues + */ +static inline int ice_get_max_rxq(struct ice_pf *pf) +{ + return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq); +} + +/** * ice_get_main_vsi - Get the PF VSI * @pf: PF instance * @@ -987,6 +1009,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); int ice_plug_aux_dev(struct ice_pf *pf); void ice_unplug_aux_dev(struct ice_pf *pf); +void ice_rdma_finalize_setup(struct ice_pf *pf); int ice_init_rdma(struct ice_pf *pf); void ice_deinit_rdma(struct ice_pf *pf); bool ice_is_wol_supported(struct ice_hw *hw); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index eb77dfa934aa..1667f686ff75 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -124,6 +124,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) if (vsi->type == ICE_VSI_VF) { ice_calc_vf_reg_idx(vsi->vf, q_vector); goto out; + } else if (vsi->type == ICE_VSI_LB) { + goto skip_alloc; } else if (vsi->type == ICE_VSI_CTRL && vsi->vf) { struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi); @@ -659,33 +661,22 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) { struct device *dev = ice_pf_to_dev(ring->vsi->back); u32 num_bufs = ICE_DESC_UNUSED(ring); - u32 rx_buf_len; int err; - if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) { - if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { - err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, - ring->q_index, - ring->q_vector->napi.napi_id, - ring->rx_buf_len); - if (err) - return err; - } - + if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF || + ring->vsi->type == ICE_VSI_LB) { ice_rx_xsk_pool(ring); err = ice_realloc_rx_xdp_bufs(ring, ring->xsk_pool); if (err) return err; if (ring->xsk_pool) { - xdp_rxq_info_unreg(&ring->xdp_rxq); - - rx_buf_len = - xsk_pool_get_rx_frame_size(ring->xsk_pool); + u32 frag_size = + xsk_pool_get_rx_frag_step(ring->xsk_pool); err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index, ring->q_vector->napi.napi_id, - rx_buf_len); + frag_size); if (err) return err; err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, @@ -702,14 +693,13 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) if (err) return err; - if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { - err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, - ring->q_index, - ring->q_vector->napi.napi_id, - ring->rx_buf_len); - if (err) - goto err_destroy_fq; - } + err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, + ring->q_index, + ring->q_vector->napi.napi_id, + ring->truesize); + if (err) + goto err_destroy_fq; + xdp_rxq_info_attach_page_pool(&ring->xdp_rxq, ring->pp); } diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 6cd63190f55d..ce11fea122d0 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1816,6 +1816,7 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode) case ice_aqc_opc_lldp_stop: case ice_aqc_opc_lldp_start: case ice_aqc_opc_lldp_filter_ctrl: + case ice_aqc_opc_sff_eeprom: return true; } @@ -1841,6 +1842,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, { struct libie_aq_desc desc_cpy; bool is_cmd_for_retry; + u8 *buf_cpy = NULL; u8 idx = 0; u16 opcode; int status; @@ -1850,8 +1852,11 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, memset(&desc_cpy, 0, sizeof(desc_cpy)); if (is_cmd_for_retry) { - /* All retryable cmds are direct, without buf. */ - WARN_ON(buf); + if (buf) { + buf_cpy = kmemdup(buf, buf_size, GFP_KERNEL); + if (!buf_cpy) + return -ENOMEM; + } memcpy(&desc_cpy, desc, sizeof(desc_cpy)); } @@ -1863,12 +1868,14 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, hw->adminq.sq_last_status != LIBIE_AQ_RC_EBUSY) break; + if (buf_cpy) + memcpy(buf, buf_cpy, buf_size); memcpy(desc, &desc_cpy, sizeof(desc_cpy)); - msleep(ICE_SQ_SEND_DELAY_TIME_MS); } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); + kfree(buf_cpy); return status; } @@ -6391,7 +6398,7 @@ int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add) struct ice_aqc_lldp_filter_ctrl *cmd; struct libie_aq_desc desc; - if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw)) + if (!ice_fw_supports_lldp_fltr_ctrl(hw)) return -EOPNOTSUPP; cmd = libie_aq_raw(&desc); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 0ea64b330614..e6a20af6f63d 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1289,6 +1289,10 @@ static u64 ice_loopback_test(struct net_device *netdev) test_vsi->netdev = netdev; tx_ring = test_vsi->tx_rings[0]; rx_ring = test_vsi->rx_rings[0]; + /* Dummy q_vector and napi. Fill the minimum required for + * ice_rxq_pp_create(). + */ + rx_ring->q_vector->napi.dev = netdev; if (ice_lbtest_prepare_rings(test_vsi)) { ret = 2; @@ -1926,6 +1930,17 @@ __ice_get_ethtool_stats(struct net_device *netdev, int i = 0; char *p; + if (ice_is_port_repr_netdev(netdev)) { + ice_update_eth_stats(vsi); + + for (j = 0; j < ICE_VSI_STATS_LEN; j++) { + p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset; + data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + return; + } + ice_update_pf_stats(pf); ice_update_vsi_stats(vsi); @@ -1935,9 +1950,6 @@ __ice_get_ethtool_stats(struct net_device *netdev, sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - if (ice_is_port_repr_netdev(netdev)) - return; - /* populate per queue stats */ rcu_read_lock(); @@ -3328,7 +3340,7 @@ process_rx: rx_rings = kzalloc_objs(*rx_rings, vsi->num_rxq); if (!rx_rings) { err = -ENOMEM; - goto done; + goto free_xdp; } ice_for_each_rxq(vsi, i) { @@ -3338,6 +3350,7 @@ process_rx: rx_rings[i].cached_phctime = pf->ptp.cached_phc_time; rx_rings[i].desc = NULL; rx_rings[i].xdp_buf = NULL; + rx_rings[i].xdp_rxq = (struct xdp_rxq_info){ }; /* this is to allow wr32 to have something to write to * during early allocation of Rx buffers @@ -3355,7 +3368,7 @@ rx_unwind: } kfree(rx_rings); err = -ENOMEM; - goto free_tx; + goto free_xdp; } } @@ -3407,6 +3420,13 @@ process_link: } goto done; +free_xdp: + if (xdp_rings) { + ice_for_each_xdp_txq(vsi, i) + ice_free_tx_ring(&xdp_rings[i]); + kfree(xdp_rings); + } + free_tx: /* error cleanup if the Rx allocations failed after getting Tx */ if (tx_rings) { @@ -3762,24 +3782,6 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) } /** - * ice_get_max_txq - return the maximum number of Tx queues for in a PF - * @pf: PF structure - */ -static int ice_get_max_txq(struct ice_pf *pf) -{ - return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq); -} - -/** - * ice_get_max_rxq - return the maximum number of Rx queues for in a PF - * @pf: PF structure - */ -static int ice_get_max_rxq(struct ice_pf *pf) -{ - return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq); -} - -/** * ice_get_combined_cnt - return the current number of combined channels * @vsi: PF VSI pointer * @@ -4505,7 +4507,7 @@ ice_get_module_eeprom(struct net_device *netdev, u8 addr = ICE_I2C_EEPROM_DEV_ADDR; struct ice_hw *hw = &pf->hw; bool is_sfp = false; - unsigned int i, j; + unsigned int i; u16 offset = 0; u8 page = 0; int status; @@ -4547,26 +4549,19 @@ ice_get_module_eeprom(struct net_device *netdev, if (page == 0 || !(data[0x2] & 0x4)) { u32 copy_len; - /* If i2c bus is busy due to slow page change or - * link management access, call can fail. This is normal. - * So we retry this a few times. - */ - for (j = 0; j < 4; j++) { - status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, - !is_sfp, value, - SFF_READ_BLOCK_SIZE, - 0, NULL); - netdev_dbg(netdev, "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%X)\n", - addr, offset, page, is_sfp, - value[0], value[1], value[2], value[3], - value[4], value[5], value[6], value[7], - status); - if (status) { - usleep_range(1500, 2500); - memset(value, 0, SFF_READ_BLOCK_SIZE); - continue; - } - break; + status = ice_aq_sff_eeprom(hw, 0, addr, offset, page, + !is_sfp, value, + SFF_READ_BLOCK_SIZE, + 0, NULL); + netdev_dbg(netdev, "SFF %02X %02X %02X %X = %02X%02X%02X%02X.%02X%02X%02X%02X (%pe)\n", + addr, offset, page, is_sfp, + value[0], value[1], value[2], value[3], + value[4], value[5], value[6], value[7], + ERR_PTR(status)); + if (status) { + netdev_err(netdev, "%s: error reading module EEPROM: status %pe\n", + __func__, ERR_PTR(status)); + return status; } /* Make sure we have enough room for the new block */ diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index 3572b0e1d49f..102d63c3018b 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -361,6 +361,39 @@ void ice_unplug_aux_dev(struct ice_pf *pf) } /** + * ice_rdma_finalize_setup - Complete RDMA setup after VSI is ready + * @pf: ptr to ice_pf + * + * Sets VSI-dependent information and plugs aux device. + * Must be called after ice_init_rdma(), ice_vsi_rebuild(), and + * ice_dcb_rebuild() complete. + */ +void ice_rdma_finalize_setup(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct iidc_rdma_priv_dev_info *privd; + int ret; + + if (!ice_is_rdma_ena(pf) || !pf->cdev_info) + return; + + privd = pf->cdev_info->iidc_priv; + if (!privd || !pf->vsi || !pf->vsi[0] || !pf->vsi[0]->netdev) + return; + + /* Assign VSI info now that VSI is valid */ + privd->netdev = pf->vsi[0]->netdev; + privd->vport_id = pf->vsi[0]->vsi_num; + + /* Update QoS info after DCB has been rebuilt */ + ice_setup_dcb_qos_info(pf, &privd->qos_info); + + ret = ice_plug_aux_dev(pf); + if (ret) + dev_warn(dev, "Failed to plug RDMA aux device: %d\n", ret); +} + +/** * ice_init_rdma - initializes PF for RDMA use * @pf: ptr to ice_pf */ @@ -398,22 +431,14 @@ int ice_init_rdma(struct ice_pf *pf) } cdev->iidc_priv = privd; - privd->netdev = pf->vsi[0]->netdev; privd->hw_addr = (u8 __iomem *)pf->hw.hw_addr; cdev->pdev = pf->pdev; - privd->vport_id = pf->vsi[0]->vsi_num; pf->cdev_info->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2; - ice_setup_dcb_qos_info(pf, &privd->qos_info); - ret = ice_plug_aux_dev(pf); - if (ret) - goto err_plug_aux_dev; + return 0; -err_plug_aux_dev: - pf->cdev_info->adev = NULL; - xa_erase(&ice_aux_id, pf->aux_idx); err_alloc_xa: kfree(privd); err_privd_alloc: @@ -432,7 +457,6 @@ void ice_deinit_rdma(struct ice_pf *pf) if (!ice_is_rdma_ena(pf)) return; - ice_unplug_aux_dev(pf); xa_erase(&ice_aux_id, pf->aux_idx); kfree(pf->cdev_info->iidc_priv); kfree(pf->cdev_info); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index f91f8b672b02..689c6025ea82 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -107,10 +107,6 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->rxq_map) goto err_rxq_map; - /* There is no need to allocate q_vectors for a loopback VSI. */ - if (vsi->type == ICE_VSI_LB) - return 0; - /* allocate memory for q_vector pointers */ vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors, sizeof(*vsi->q_vectors), GFP_KERNEL); @@ -241,6 +237,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) case ICE_VSI_LB: vsi->alloc_txq = 1; vsi->alloc_rxq = 1; + /* A dummy q_vector, no actual IRQ. */ + vsi->num_q_vectors = 1; break; default: dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type); @@ -2426,14 +2424,21 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi) } break; case ICE_VSI_LB: - ret = ice_vsi_alloc_rings(vsi); + ret = ice_vsi_alloc_q_vectors(vsi); if (ret) goto unroll_vsi_init; + ret = ice_vsi_alloc_rings(vsi); + if (ret) + goto unroll_alloc_q_vector; + ret = ice_vsi_alloc_ring_stats(vsi); if (ret) goto unroll_vector_base; + /* Simply map the dummy q_vector to the only rx_ring */ + vsi->rx_rings[0]->q_vector = vsi->q_vectors[0]; + break; default: /* clean up the resources and exit */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index ebf48feffb30..3c36e3641b9e 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4699,8 +4699,8 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) struct net_device *netdev; u8 mac_addr[ETH_ALEN]; - netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, - vsi->alloc_rxq); + netdev = alloc_etherdev_mqs(sizeof(*np), ice_get_max_txq(vsi->back), + ice_get_max_rxq(vsi->back)); if (!netdev) return -ENOMEM; @@ -5138,6 +5138,9 @@ int ice_load(struct ice_pf *pf) if (err) goto err_init_rdma; + /* Finalize RDMA: VSI already created, assign info and plug device */ + ice_rdma_finalize_setup(pf); + ice_service_task_restart(pf); clear_bit(ICE_DOWN, pf->state); @@ -5169,6 +5172,7 @@ void ice_unload(struct ice_pf *pf) devl_assert_locked(priv_to_devlink(pf)); + ice_unplug_aux_dev(pf); ice_deinit_rdma(pf); ice_deinit_features(pf); ice_tc_indir_block_unregister(vsi); @@ -5595,6 +5599,7 @@ static int ice_suspend(struct device *dev) */ disabled = ice_service_task_stop(pf); + ice_unplug_aux_dev(pf); ice_deinit_rdma(pf); /* Already suspended?, then there is nothing to do */ @@ -7859,7 +7864,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_health_clear(pf); - ice_plug_aux_dev(pf); + ice_rdma_finalize_setup(pf); if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) ice_lag_rebuild(pf); diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index 90f99443a922..096566c697f4 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -2,6 +2,7 @@ /* Copyright (C) 2019-2021, Intel Corporation. */ #include "ice.h" +#include "ice_lib.h" #include "ice_eswitch.h" #include "devlink/devlink.h" #include "devlink/port.h" @@ -67,7 +68,7 @@ ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) return; vsi = repr->src_vsi; - ice_update_vsi_stats(vsi); + ice_update_eth_stats(vsi); eth_stats = &vsi->eth_stats; stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast + @@ -315,7 +316,7 @@ ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops) static int ice_repr_ready_vf(struct ice_repr *repr) { - return !ice_check_vf_ready_for_cfg(repr->vf); + return ice_check_vf_ready_for_cfg(repr->vf); } static int ice_repr_ready_sf(struct ice_repr *repr) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index a5bbce68f76c..a2cd4cf37734 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -560,7 +560,9 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring) i = 0; } - if (rx_ring->vsi->type == ICE_VSI_PF && + if ((rx_ring->vsi->type == ICE_VSI_PF || + rx_ring->vsi->type == ICE_VSI_SF || + rx_ring->vsi->type == ICE_VSI_LB) && xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) { xdp_rxq_info_detach_mem_model(&rx_ring->xdp_rxq); xdp_rxq_info_unreg(&rx_ring->xdp_rxq); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index c673094663a3..0643017541c3 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -899,6 +899,9 @@ void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) u16 ntc = rx_ring->next_to_clean; u16 ntu = rx_ring->next_to_use; + if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); + while (ntc != ntu) { struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc); diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index b206fba092c8..ec1b75f039bb 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -1066,7 +1066,7 @@ bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val); int idpf_idc_init(struct idpf_adapter *adapter); int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter, enum iidc_function_type ftype); -void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info); +void idpf_idc_deinit_core_aux_device(struct idpf_adapter *adapter); void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info); void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info); void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info, diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 40e08a71d2d3..bb99d9e7c65d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -307,9 +307,6 @@ static int idpf_del_flow_steer(struct net_device *netdev, vport_config = vport->adapter->vport_config[np->vport_idx]; user_config = &vport_config->user_config; - if (!idpf_sideband_action_ena(vport, fsp)) - return -EOPNOTSUPP; - rule = kzalloc_flex(*rule, rule_info, 1); if (!rule) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/idpf/idpf_idc.c b/drivers/net/ethernet/intel/idpf/idpf_idc.c index bd4785fb8d3e..7e4f4ac92653 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_idc.c +++ b/drivers/net/ethernet/intel/idpf/idpf_idc.c @@ -470,10 +470,11 @@ err_privd_alloc: /** * idpf_idc_deinit_core_aux_device - de-initialize Auxiliary Device(s) - * @cdev_info: IDC core device info pointer + * @adapter: driver private data structure */ -void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info) +void idpf_idc_deinit_core_aux_device(struct idpf_adapter *adapter) { + struct iidc_rdma_core_dev_info *cdev_info = adapter->cdev_info; struct iidc_rdma_priv_dev_info *privd; if (!cdev_info) @@ -485,6 +486,7 @@ void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info) kfree(privd->mapped_mem_regions); kfree(privd); kfree(cdev_info); + adapter->cdev_info = NULL; } /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index de3df705a7e6..cf966fe6c759 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1318,6 +1318,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, free_rss_key: kfree(rss_data->rss_key); + rss_data->rss_key = NULL; free_qreg_chunks: idpf_vport_deinit_queue_reg_chunks(adapter->vport_config[idx]); free_vector_idxs: diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 05a162094d10..f6b3b15364ff 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -1314,6 +1314,9 @@ static void idpf_txq_group_rel(struct idpf_q_vec_rsrc *rsrc) struct idpf_txq_group *txq_grp = &rsrc->txq_grps[i]; for (unsigned int j = 0; j < txq_grp->num_txq; j++) { + if (!txq_grp->txqs[j]) + continue; + if (idpf_queue_has(FLOW_SCH_EN, txq_grp->txqs[j])) { kfree(txq_grp->txqs[j]->refillq); txq_grp->txqs[j]->refillq = NULL; @@ -1339,6 +1342,9 @@ static void idpf_txq_group_rel(struct idpf_q_vec_rsrc *rsrc) */ static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp) { + if (!rx_qgrp->splitq.bufq_sets) + return; + for (unsigned int i = 0; i < rx_qgrp->splitq.num_bufq_sets; i++) { struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i]; @@ -1854,13 +1860,13 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, idpf_queue_assign(HSPLIT_EN, q, hs); idpf_queue_assign(RSC_EN, q, rsc); - bufq_set->num_refillqs = num_rxq; bufq_set->refillqs = kcalloc(num_rxq, swq_size, GFP_KERNEL); if (!bufq_set->refillqs) { err = -ENOMEM; goto err_alloc; } + bufq_set->num_refillqs = num_rxq; for (unsigned int k = 0; k < bufq_set->num_refillqs; k++) { struct idpf_sw_queue *refillq = &bufq_set->refillqs[k]; @@ -2336,7 +2342,7 @@ void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq) do { struct idpf_splitq_4b_tx_compl_desc *tx_desc; - struct idpf_tx_queue *target; + struct idpf_tx_queue *target = NULL; u32 ctype_gen, id; tx_desc = flow ? &complq->comp[ntc].common : @@ -2356,14 +2362,14 @@ void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq) target = complq->txq_grp->txqs[id]; idpf_queue_clear(SW_MARKER, target); - if (target == txq) - break; next: if (unlikely(++ntc == complq->desc_count)) { ntc = 0; gen_flag = !gen_flag; } + if (target == txq) + break; } while (time_before(jiffies, timeout)); idpf_queue_assign(GEN_CHK, complq, gen_flag); @@ -4059,7 +4065,7 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, continue; name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name, - vec_name, vidx); + vec_name, vector); err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0, name, q_vector); diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index d5a877e1fef8..113ecfc16dd7 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -3668,7 +3668,7 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter) idpf_ptp_release(adapter); idpf_deinit_task(adapter); - idpf_idc_deinit_core_aux_device(adapter->cdev_info); + idpf_idc_deinit_core_aux_device(adapter); idpf_rel_rx_pt_lkup(adapter); idpf_intr_rel(adapter); diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c index 6ac9c6624c2a..cbccd4546768 100644 --- a/drivers/net/ethernet/intel/idpf/xdp.c +++ b/drivers/net/ethernet/intel/idpf/xdp.c @@ -47,12 +47,16 @@ static int __idpf_xdp_rxq_info_init(struct idpf_rx_queue *rxq, void *arg) { const struct idpf_vport *vport = rxq->q_vector->vport; const struct idpf_q_vec_rsrc *rsrc; + u32 frag_size = 0; bool split; int err; + if (idpf_queue_has(XSK, rxq)) + frag_size = rxq->bufq_sets[0].bufq.truesize; + err = __xdp_rxq_info_reg(&rxq->xdp_rxq, vport->netdev, rxq->idx, rxq->q_vector->napi.napi_id, - rxq->rx_buf_size); + frag_size); if (err) return err; diff --git a/drivers/net/ethernet/intel/idpf/xsk.c b/drivers/net/ethernet/intel/idpf/xsk.c index 676cbd80774d..d95d3efdfd36 100644 --- a/drivers/net/ethernet/intel/idpf/xsk.c +++ b/drivers/net/ethernet/intel/idpf/xsk.c @@ -403,6 +403,7 @@ int idpf_xskfq_init(struct idpf_buf_queue *bufq) bufq->pending = fq.pending; bufq->thresh = fq.thresh; bufq->rx_buf_size = fq.buf_len; + bufq->truesize = fq.truesize; if (!idpf_xskfq_refill(bufq)) netdev_err(bufq->pool->netdev, diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c index 30ce5fbb5b77..ce4a7b58cad2 100644 --- a/drivers/net/ethernet/intel/igb/igb_xsk.c +++ b/drivers/net/ethernet/intel/igb/igb_xsk.c @@ -524,6 +524,16 @@ bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool) return nb_pkts < budget; } +static u32 igb_sw_irq_prep(struct igb_q_vector *q_vector) +{ + u32 eics = 0; + + if (!napi_if_scheduled_mark_missed(&q_vector->napi)) + eics = q_vector->eims_value; + + return eics; +} + int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) { struct igb_adapter *adapter = netdev_priv(dev); @@ -542,20 +552,32 @@ int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) ring = adapter->tx_ring[qid]; - if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags)) - return -ENETDOWN; - if (!READ_ONCE(ring->xsk_pool)) return -EINVAL; - if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { + if (flags & XDP_WAKEUP_TX) { + if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags)) + return -ENETDOWN; + + eics |= igb_sw_irq_prep(ring->q_vector); + } + + if (flags & XDP_WAKEUP_RX) { + /* If IGB_FLAG_QUEUE_PAIRS is active, the q_vector + * and NAPI is shared between RX and TX. + * If NAPI is already running it would be marked as missed + * from the TX path, making this RX call a NOP + */ + ring = adapter->rx_ring[qid]; + eics |= igb_sw_irq_prep(ring->q_vector); + } + + if (eics) { /* Cause software interrupt */ - if (adapter->flags & IGB_FLAG_HAS_MSIX) { - eics |= ring->q_vector->eims_value; + if (adapter->flags & IGB_FLAG_HAS_MSIX) wr32(E1000_EICS, eics); - } else { + else wr32(E1000_ICS, E1000_ICS_RXDMT0); - } } return 0; diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index a427f05814c1..17236813965d 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -781,6 +781,8 @@ int igc_ptp_hwtstamp_set(struct net_device *netdev, struct kernel_hwtstamp_config *config, struct netlink_ext_ack *extack); void igc_ptp_tx_hang(struct igc_adapter *adapter); +void igc_ptp_clear_xsk_tx_tstamp_queue(struct igc_adapter *adapter, + u16 queue_id); void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts); void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 27e5c2109138..72bc5128d8b8 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -264,6 +264,13 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) /* reset next_to_use and next_to_clean */ tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; + + /* Clear any lingering XSK TX timestamp requests */ + if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) { + struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); + + igc_ptp_clear_xsk_tx_tstamp_queue(adapter, tx_ring->queue_index); + } } /** @@ -1730,11 +1737,8 @@ static netdev_tx_t igc_xmit_frame(struct sk_buff *skb, /* The minimum packet size with TCTL.PSP set is 17 so pad the skb * in order to meet this minimum size requirement. */ - if (skb->len < 17) { - if (skb_padto(skb, 17)) - return NETDEV_TX_OK; - skb->len = 17; - } + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb)); } @@ -6906,28 +6910,29 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, return nxmit; } -static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, - struct igc_q_vector *q_vector) +static u32 igc_sw_irq_prep(struct igc_q_vector *q_vector) { - struct igc_hw *hw = &adapter->hw; u32 eics = 0; - eics |= q_vector->eims_value; - wr32(IGC_EICS, eics); + if (!napi_if_scheduled_mark_missed(&q_vector->napi)) + eics = q_vector->eims_value; + + return eics; } int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) { struct igc_adapter *adapter = netdev_priv(dev); - struct igc_q_vector *q_vector; + struct igc_hw *hw = &adapter->hw; struct igc_ring *ring; + u32 eics = 0; if (test_bit(__IGC_DOWN, &adapter->state)) return -ENETDOWN; if (!igc_xdp_is_enabled(adapter)) return -ENXIO; - + /* Check if queue_id is valid. Tx and Rx queue numbers are always same */ if (queue_id >= adapter->num_rx_queues) return -EINVAL; @@ -6936,9 +6941,22 @@ int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) if (!ring->xsk_pool) return -ENXIO; - q_vector = adapter->q_vector[queue_id]; - if (!napi_if_scheduled_mark_missed(&q_vector->napi)) - igc_trigger_rxtxq_interrupt(adapter, q_vector); + if (flags & XDP_WAKEUP_RX) + eics |= igc_sw_irq_prep(ring->q_vector); + + if (flags & XDP_WAKEUP_TX) { + /* If IGC_FLAG_QUEUE_PAIRS is active, the q_vector + * and NAPI is shared between RX and TX. + * If NAPI is already running it would be marked as missed + * from the RX path, making this TX call a NOP + */ + ring = adapter->tx_ring[queue_id]; + eics |= igc_sw_irq_prep(ring->q_vector); + } + + if (eics) + /* Cause software interrupt */ + wr32(IGC_EICS, eics); return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 7aae83c108fd..3d6b2264164a 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -550,7 +550,8 @@ static void igc_ptp_free_tx_buffer(struct igc_adapter *adapter, tstamp->buffer_type = 0; /* Trigger txrx interrupt for transmit completion */ - igc_xsk_wakeup(adapter->netdev, tstamp->xsk_queue_index, 0); + igc_xsk_wakeup(adapter->netdev, tstamp->xsk_queue_index, + XDP_WAKEUP_TX); return; } @@ -576,6 +577,39 @@ static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter) spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); } +/** + * igc_ptp_clear_xsk_tx_tstamp_queue - Clear pending XSK TX timestamps for a queue + * @adapter: Board private structure + * @queue_id: TX queue index to clear timestamps for + * + * Iterates over all TX timestamp registers and releases any pending + * timestamp requests associated with the given TX queue. This is + * called when an XDP pool is being disabled to ensure no stale + * timestamp references remain. + */ +void igc_ptp_clear_xsk_tx_tstamp_queue(struct igc_adapter *adapter, u16 queue_id) +{ + unsigned long flags; + int i; + + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); + + for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) { + struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; + + if (tstamp->buffer_type != IGC_TX_BUFFER_TYPE_XSK) + continue; + if (tstamp->xsk_queue_index != queue_id) + continue; + if (!tstamp->xsk_tx_buffer) + continue; + + igc_ptp_free_tx_buffer(adapter, tstamp); + } + + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); +} + static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 74d320879513..b67b580f7f1c 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -852,7 +852,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, if (!mac->get_link_status) goto out; - if (hw->mac.type == ixgbe_mac_e610_vf) { + if (hw->mac.type == ixgbe_mac_e610_vf && + hw->api_version >= ixgbe_mbox_api_16) { ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up); if (ret_val) goto out; diff --git a/drivers/net/ethernet/intel/libeth/xsk.c b/drivers/net/ethernet/intel/libeth/xsk.c index 846e902e31b6..4882951d5c9c 100644 --- a/drivers/net/ethernet/intel/libeth/xsk.c +++ b/drivers/net/ethernet/intel/libeth/xsk.c @@ -167,6 +167,7 @@ int libeth_xskfq_create(struct libeth_xskfq *fq) fq->pending = fq->count; fq->thresh = libeth_xdp_queue_threshold(fq->count); fq->buf_len = xsk_pool_get_rx_frame_size(fq->pool); + fq->truesize = xsk_pool_get_rx_frag_step(fq->pool); return 0; } diff --git a/drivers/net/ethernet/intel/libie/fwlog.c b/drivers/net/ethernet/intel/libie/fwlog.c index 79020d990859..96bba57c8a5b 100644 --- a/drivers/net/ethernet/intel/libie/fwlog.c +++ b/drivers/net/ethernet/intel/libie/fwlog.c @@ -433,17 +433,21 @@ libie_debugfs_module_write(struct file *filp, const char __user *buf, module = libie_find_module_by_dentry(fwlog->debugfs_modules, dentry); if (module < 0) { dev_info(dev, "unknown module\n"); - return -EINVAL; + count = -EINVAL; + goto free_cmd_buf; } cnt = sscanf(cmd_buf, "%s", user_val); - if (cnt != 1) - return -EINVAL; + if (cnt != 1) { + count = -EINVAL; + goto free_cmd_buf; + } log_level = sysfs_match_string(libie_fwlog_level_string, user_val); if (log_level < 0) { dev_info(dev, "unknown log level '%s'\n", user_val); - return -EINVAL; + count = -EINVAL; + goto free_cmd_buf; } if (module != LIBIE_AQC_FW_LOG_ID_MAX) { @@ -458,6 +462,9 @@ libie_debugfs_module_write(struct file *filp, const char __user *buf, fwlog->cfg.module_entries[i].log_level = log_level; } +free_cmd_buf: + kfree(cmd_buf); + return count; } @@ -515,23 +522,31 @@ libie_debugfs_nr_messages_write(struct file *filp, const char __user *buf, return PTR_ERR(cmd_buf); ret = sscanf(cmd_buf, "%s", user_val); - if (ret != 1) - return -EINVAL; + if (ret != 1) { + count = -EINVAL; + goto free_cmd_buf; + } ret = kstrtos16(user_val, 0, &nr_messages); - if (ret) - return ret; + if (ret) { + count = ret; + goto free_cmd_buf; + } if (nr_messages < LIBIE_AQC_FW_LOG_MIN_RESOLUTION || nr_messages > LIBIE_AQC_FW_LOG_MAX_RESOLUTION) { dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n", nr_messages, LIBIE_AQC_FW_LOG_MIN_RESOLUTION, LIBIE_AQC_FW_LOG_MAX_RESOLUTION); - return -EINVAL; + count = -EINVAL; + goto free_cmd_buf; } fwlog->cfg.log_resolution = nr_messages; +free_cmd_buf: + kfree(cmd_buf); + return count; } @@ -588,8 +603,10 @@ libie_debugfs_enable_write(struct file *filp, const char __user *buf, return PTR_ERR(cmd_buf); ret = sscanf(cmd_buf, "%s", user_val); - if (ret != 1) - return -EINVAL; + if (ret != 1) { + ret = -EINVAL; + goto free_cmd_buf; + } ret = kstrtobool(user_val, &enable); if (ret) @@ -624,6 +641,8 @@ enable_write_error: */ if (WARN_ON(ret != (ssize_t)count && ret >= 0)) ret = -EIO; +free_cmd_buf: + kfree(cmd_buf); return ret; } @@ -682,8 +701,10 @@ libie_debugfs_log_size_write(struct file *filp, const char __user *buf, return PTR_ERR(cmd_buf); ret = sscanf(cmd_buf, "%s", user_val); - if (ret != 1) - return -EINVAL; + if (ret != 1) { + ret = -EINVAL; + goto free_cmd_buf; + } index = sysfs_match_string(libie_fwlog_log_size, user_val); if (index < 0) { @@ -712,6 +733,8 @@ log_size_write_error: */ if (WARN_ON(ret != (ssize_t)count && ret >= 0)) ret = -EIO; +free_cmd_buf: + kfree(cmd_buf); return ret; } @@ -1049,6 +1072,10 @@ void libie_fwlog_deinit(struct libie_fwlog *fwlog) { int status; + /* if FW logging isn't supported it means no configuration was done */ + if (!libie_fwlog_supported(fwlog)) + return; + /* make sure FW logging is disabled to not put the FW in a weird state * for the next driver load */ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index d1b8650cb4b4..f442b874bb59 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5016,7 +5016,7 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) if (priv->percpu_pools) numbufs = port->nrxqs * 2; - if (change_percpu) + if (change_percpu && priv->global_tx_fc) mvpp2_bm_pool_update_priv_fc(priv, false); for (i = 0; i < numbufs; i++) @@ -5041,7 +5041,7 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) mvpp2_open(port->dev); } - if (change_percpu) + if (change_percpu && priv->global_tx_fc) mvpp2_bm_pool_update_priv_fc(priv, true); return 0; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index ec55eb2a6c04..028dd55604ea 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -554,28 +554,43 @@ static void octep_clean_irqs(struct octep_device *oct) } /** - * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue. + * octep_update_pkt() - Update IQ/OQ IN/OUT_CNT registers. * * @iq: Octeon Tx queue data structure. * @oq: Octeon Rx queue data structure. */ -static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) +static void octep_update_pkt(struct octep_iq *iq, struct octep_oq *oq) { - u32 pkts_pend = oq->pkts_pending; + u32 pkts_pend = READ_ONCE(oq->pkts_pending); + u32 last_pkt_count = READ_ONCE(oq->last_pkt_count); + u32 pkts_processed = READ_ONCE(iq->pkts_processed); + u32 pkt_in_done = READ_ONCE(iq->pkt_in_done); netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); - if (iq->pkts_processed) { - writel(iq->pkts_processed, iq->inst_cnt_reg); - iq->pkt_in_done -= iq->pkts_processed; - iq->pkts_processed = 0; + if (pkts_processed) { + writel(pkts_processed, iq->inst_cnt_reg); + readl(iq->inst_cnt_reg); + WRITE_ONCE(iq->pkt_in_done, (pkt_in_done - pkts_processed)); + WRITE_ONCE(iq->pkts_processed, 0); } - if (oq->last_pkt_count - pkts_pend) { - writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); - oq->last_pkt_count = pkts_pend; + if (last_pkt_count - pkts_pend) { + writel(last_pkt_count - pkts_pend, oq->pkts_sent_reg); + readl(oq->pkts_sent_reg); + WRITE_ONCE(oq->last_pkt_count, pkts_pend); } /* Flush the previous wrties before writing to RESEND bit */ - wmb(); + smp_wmb(); +} + +/** + * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue. + * + * @iq: Octeon Tx queue data structure. + * @oq: Octeon Rx queue data structure. + */ +static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) +{ writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg); writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); } @@ -601,7 +616,8 @@ static int octep_napi_poll(struct napi_struct *napi, int budget) if (tx_pending || rx_done >= budget) return budget; - napi_complete(napi); + octep_update_pkt(ioq_vector->iq, ioq_vector->oq); + napi_complete_done(napi, rx_done); octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); return rx_done; } diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c index f2a7c6a76c74..74de19166488 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c @@ -324,10 +324,16 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct, struct octep_oq *oq) { u32 pkt_count, new_pkts; + u32 last_pkt_count, pkts_pending; pkt_count = readl(oq->pkts_sent_reg); - new_pkts = pkt_count - oq->last_pkt_count; + last_pkt_count = READ_ONCE(oq->last_pkt_count); + new_pkts = pkt_count - last_pkt_count; + if (pkt_count < last_pkt_count) { + dev_err(oq->dev, "OQ-%u pkt_count(%u) < oq->last_pkt_count(%u)\n", + oq->q_no, pkt_count, last_pkt_count); + } /* Clear the hardware packets counter register if the rx queue is * being processed continuously with-in a single interrupt and * reached half its max value. @@ -338,8 +344,9 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct, pkt_count = readl(oq->pkts_sent_reg); new_pkts += pkt_count; } - oq->last_pkt_count = pkt_count; - oq->pkts_pending += new_pkts; + WRITE_ONCE(oq->last_pkt_count, pkt_count); + pkts_pending = READ_ONCE(oq->pkts_pending); + WRITE_ONCE(oq->pkts_pending, (pkts_pending + new_pkts)); return new_pkts; } @@ -414,7 +421,7 @@ static int __octep_oq_process_rx(struct octep_device *oct, u16 rx_ol_flags; u32 read_idx; - read_idx = oq->host_read_idx; + read_idx = READ_ONCE(oq->host_read_idx); rx_bytes = 0; desc_used = 0; for (pkt = 0; pkt < pkts_to_process; pkt++) { @@ -499,7 +506,7 @@ static int __octep_oq_process_rx(struct octep_device *oct, napi_gro_receive(oq->napi, skb); } - oq->host_read_idx = read_idx; + WRITE_ONCE(oq->host_read_idx, read_idx); oq->refill_count += desc_used; oq->stats->packets += pkt; oq->stats->bytes += rx_bytes; @@ -522,22 +529,26 @@ int octep_oq_process_rx(struct octep_oq *oq, int budget) { u32 pkts_available, pkts_processed, total_pkts_processed; struct octep_device *oct = oq->octep_dev; + u32 pkts_pending; pkts_available = 0; pkts_processed = 0; total_pkts_processed = 0; while (total_pkts_processed < budget) { /* update pending count only when current one exhausted */ - if (oq->pkts_pending == 0) + pkts_pending = READ_ONCE(oq->pkts_pending); + if (pkts_pending == 0) octep_oq_check_hw_for_pkts(oct, oq); + pkts_pending = READ_ONCE(oq->pkts_pending); pkts_available = min(budget - total_pkts_processed, - oq->pkts_pending); + pkts_pending); if (!pkts_available) break; pkts_processed = __octep_oq_process_rx(oct, oq, pkts_available); - oq->pkts_pending -= pkts_processed; + pkts_pending = READ_ONCE(oq->pkts_pending); + WRITE_ONCE(oq->pkts_pending, (pkts_pending - pkts_processed)); total_pkts_processed += pkts_processed; } diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c index 562fe945b422..7f1b93cffb98 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c @@ -286,28 +286,45 @@ static void octep_vf_clean_irqs(struct octep_vf_device *oct) } /** - * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue. + * octep_vf_update_pkt() - Update IQ/OQ IN/OUT_CNT registers. * * @iq: Octeon Tx queue data structure. * @oq: Octeon Rx queue data structure. */ -static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq) + +static void octep_vf_update_pkt(struct octep_vf_iq *iq, struct octep_vf_oq *oq) { - u32 pkts_pend = oq->pkts_pending; + u32 pkts_pend = READ_ONCE(oq->pkts_pending); + u32 last_pkt_count = READ_ONCE(oq->last_pkt_count); + u32 pkts_processed = READ_ONCE(iq->pkts_processed); + u32 pkt_in_done = READ_ONCE(iq->pkt_in_done); netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); - if (iq->pkts_processed) { - writel(iq->pkts_processed, iq->inst_cnt_reg); - iq->pkt_in_done -= iq->pkts_processed; - iq->pkts_processed = 0; + if (pkts_processed) { + writel(pkts_processed, iq->inst_cnt_reg); + readl(iq->inst_cnt_reg); + WRITE_ONCE(iq->pkt_in_done, (pkt_in_done - pkts_processed)); + WRITE_ONCE(iq->pkts_processed, 0); } - if (oq->last_pkt_count - pkts_pend) { - writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); - oq->last_pkt_count = pkts_pend; + if (last_pkt_count - pkts_pend) { + writel(last_pkt_count - pkts_pend, oq->pkts_sent_reg); + readl(oq->pkts_sent_reg); + WRITE_ONCE(oq->last_pkt_count, pkts_pend); } /* Flush the previous wrties before writing to RESEND bit */ smp_wmb(); +} + +/** + * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue. + * + * @iq: Octeon Tx queue data structure. + * @oq: Octeon Rx queue data structure. + */ +static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, + struct octep_vf_oq *oq) +{ writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg); writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); } @@ -333,6 +350,7 @@ static int octep_vf_napi_poll(struct napi_struct *napi, int budget) if (tx_pending || rx_done >= budget) return budget; + octep_vf_update_pkt(ioq_vector->iq, ioq_vector->oq); if (likely(napi_complete_done(napi, rx_done))) octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c index 6f865dbbba6c..b579d5b545c4 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c @@ -325,9 +325,16 @@ static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct, struct octep_vf_oq *oq) { u32 pkt_count, new_pkts; + u32 last_pkt_count, pkts_pending; pkt_count = readl(oq->pkts_sent_reg); - new_pkts = pkt_count - oq->last_pkt_count; + last_pkt_count = READ_ONCE(oq->last_pkt_count); + new_pkts = pkt_count - last_pkt_count; + + if (pkt_count < last_pkt_count) { + dev_err(oq->dev, "OQ-%u pkt_count(%u) < oq->last_pkt_count(%u)\n", + oq->q_no, pkt_count, last_pkt_count); + } /* Clear the hardware packets counter register if the rx queue is * being processed continuously with-in a single interrupt and @@ -339,8 +346,9 @@ static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct, pkt_count = readl(oq->pkts_sent_reg); new_pkts += pkt_count; } - oq->last_pkt_count = pkt_count; - oq->pkts_pending += new_pkts; + WRITE_ONCE(oq->last_pkt_count, pkt_count); + pkts_pending = READ_ONCE(oq->pkts_pending); + WRITE_ONCE(oq->pkts_pending, (pkts_pending + new_pkts)); return new_pkts; } @@ -369,7 +377,7 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct, struct sk_buff *skb; u32 read_idx; - read_idx = oq->host_read_idx; + read_idx = READ_ONCE(oq->host_read_idx); rx_bytes = 0; desc_used = 0; for (pkt = 0; pkt < pkts_to_process; pkt++) { @@ -463,7 +471,7 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct, napi_gro_receive(oq->napi, skb); } - oq->host_read_idx = read_idx; + WRITE_ONCE(oq->host_read_idx, read_idx); oq->refill_count += desc_used; oq->stats->packets += pkt; oq->stats->bytes += rx_bytes; @@ -486,22 +494,26 @@ int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget) { u32 pkts_available, pkts_processed, total_pkts_processed; struct octep_vf_device *oct = oq->octep_vf_dev; + u32 pkts_pending; pkts_available = 0; pkts_processed = 0; total_pkts_processed = 0; while (total_pkts_processed < budget) { /* update pending count only when current one exhausted */ - if (oq->pkts_pending == 0) + pkts_pending = READ_ONCE(oq->pkts_pending); + if (pkts_pending == 0) octep_vf_oq_check_hw_for_pkts(oct, oq); + pkts_pending = READ_ONCE(oq->pkts_pending); pkts_available = min(budget - total_pkts_processed, - oq->pkts_pending); + pkts_pending); if (!pkts_available) break; pkts_processed = __octep_vf_oq_process_rx(oct, oq, pkts_available); - oq->pkts_pending -= pkts_processed; + pkts_pending = READ_ONCE(oq->pkts_pending); + WRITE_ONCE(oq->pkts_pending, (pkts_pending - pkts_processed)); total_pkts_processed += pkts_processed; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index fb15c794efc9..a29f1ea04c7d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -327,10 +327,10 @@ static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx, rvu_report_pair_end(fmsg); break; case NIX_AF_RVU_RAS: - intr_val = nix_event_context->nix_af_rvu_err; + intr_val = nix_event_context->nix_af_rvu_ras; rvu_report_pair_start(fmsg, "NIX_AF_RAS"); devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ", - nix_event_context->nix_af_rvu_err); + nix_event_context->nix_af_rvu_ras); devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:"); if (intr_val & BIT_ULL(34)) devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S"); @@ -475,7 +475,7 @@ static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter, if (blkaddr < 0) return blkaddr; - if (nix_event_ctx->nix_af_rvu_int) + if (nix_event_ctx->nix_af_rvu_ras) rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL); return 0; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index e5e2ffa9c542..ddc321a02fda 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -3748,12 +3748,21 @@ static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog, mtk_stop(dev); old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held()); + + if (netif_running(dev) && need_update) { + int err; + + err = mtk_open(dev); + if (err) { + rcu_assign_pointer(eth->prog, old_prog); + + return err; + } + } + if (old_prog) bpf_prog_put(old_prog); - if (netif_running(dev) && need_update) - return mtk_open(dev); - return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 60ba840e00fa..afdeb1b3d425 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -47,7 +47,6 @@ static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) "SQ 0x%x: cc (0x%x) != pc (0x%x)\n", sq->sqn, sq->cc, sq->pc); sq->cc = 0; - sq->dma_fifo_cc = 0; sq->pc = 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index f8eaaf37963b..abcbd38db9db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -287,6 +287,7 @@ struct mlx5e_ipsec_sa_entry { struct mlx5e_ipsec_dwork *dwork; struct mlx5e_ipsec_limits limits; u32 rx_mapped_id; + u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)]; }; struct mlx5_accel_pol_xfrm_attrs { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 197a1c6930c0..329608c59313 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -2912,7 +2912,7 @@ void mlx5e_ipsec_disable_events(struct mlx5e_priv *priv) goto out; peer_priv = mlx5_devcom_get_next_peer_data(priv->devcom, &tmp); - if (peer_priv) + if (peer_priv && peer_priv->ipsec) complete_all(&peer_priv->ipsec->comp); mlx5_devcom_for_each_peer_end(priv->devcom); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c index 33344e00719b..05faad5083d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c @@ -310,10 +310,11 @@ static void mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry *sa_entry, mlx5e_ipsec_aso_query(sa_entry, data); } -static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry, - u32 mode_param) +static void +mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry, + u32 mode_param, + struct mlx5_accel_esp_xfrm_attrs *attrs) { - struct mlx5_accel_esp_xfrm_attrs attrs = {}; struct mlx5_wqe_aso_ctrl_seg data = {}; if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) { @@ -323,18 +324,7 @@ static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry, sa_entry->esn_state.overlap = 1; } - mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs); - - /* It is safe to execute the modify below unlocked since the only flows - * that could affect this HW object, are create, destroy and this work. - * - * Creation flow can't co-exist with this modify work, the destruction - * flow would cancel this work, and this work is a single entity that - * can't conflict with it self. - */ - spin_unlock_bh(&sa_entry->x->lock); - mlx5_accel_esp_modify_xfrm(sa_entry, &attrs); - spin_lock_bh(&sa_entry->x->lock); + mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, attrs); data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET; @@ -370,20 +360,18 @@ static void mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry *sa_entry, static void mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry *sa_entry) { struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs; - struct mlx5e_ipsec *ipsec = sa_entry->ipsec; - struct mlx5e_ipsec_aso *aso = ipsec->aso; bool soft_arm, hard_arm; u64 hard_cnt; lockdep_assert_held(&sa_entry->x->lock); - soft_arm = !MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm); - hard_arm = !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm); + soft_arm = !MLX5_GET(ipsec_aso, sa_entry->ctx, soft_lft_arm); + hard_arm = !MLX5_GET(ipsec_aso, sa_entry->ctx, hard_lft_arm); if (!soft_arm && !hard_arm) /* It is not lifetime event */ return; - hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt); + hard_cnt = MLX5_GET(ipsec_aso, sa_entry->ctx, remove_flow_pkt_cnt); if (!hard_cnt || hard_arm) { /* It is possible to see packet counter equal to zero without * hard limit event armed. Such situation can be if packet @@ -453,11 +441,11 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work) struct mlx5e_ipsec_work *work = container_of(_work, struct mlx5e_ipsec_work, work); struct mlx5e_ipsec_sa_entry *sa_entry = work->data; + struct mlx5_accel_esp_xfrm_attrs tmp = {}; struct mlx5_accel_esp_xfrm_attrs *attrs; - struct mlx5e_ipsec_aso *aso; + bool need_modify = false; int ret; - aso = sa_entry->ipsec->aso; attrs = &sa_entry->attrs; spin_lock_bh(&sa_entry->x->lock); @@ -465,18 +453,22 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work) if (ret) goto unlock; + if (attrs->lft.soft_packet_limit != XFRM_INF) + mlx5e_ipsec_handle_limits(sa_entry); + if (attrs->replay_esn.trigger && - !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) { - u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter); + !MLX5_GET(ipsec_aso, sa_entry->ctx, esn_event_arm)) { + u32 mode_param = MLX5_GET(ipsec_aso, sa_entry->ctx, + mode_parameter); - mlx5e_ipsec_update_esn_state(sa_entry, mode_param); + mlx5e_ipsec_update_esn_state(sa_entry, mode_param, &tmp); + need_modify = true; } - if (attrs->lft.soft_packet_limit != XFRM_INF) - mlx5e_ipsec_handle_limits(sa_entry); - unlock: spin_unlock_bh(&sa_entry->x->lock); + if (need_modify) + mlx5_accel_esp_modify_xfrm(sa_entry, &tmp); kfree(work); } @@ -629,6 +621,8 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry, /* We are in atomic context */ udelay(10); } while (ret && time_is_after_jiffies(expires)); + if (!ret) + memcpy(sa_entry->ctx, aso->ctx, MLX5_ST_SZ_BYTES(ipsec_aso)); spin_unlock_bh(&aso->lock); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index efcfcddab376..268e20884757 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1589,6 +1589,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi struct skb_shared_info *sinfo; u32 frag_consumed_bytes; struct bpf_prog *prog; + u8 nr_frags_free = 0; struct sk_buff *skb; dma_addr_t addr; u32 truesize; @@ -1631,15 +1632,13 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi prog = rcu_dereference(rq->xdp_prog); if (prog) { - u8 nr_frags_free, old_nr_frags = sinfo->nr_frags; + u8 old_nr_frags = sinfo->nr_frags; if (mlx5e_xdp_handle(rq, prog, mxbuf)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { struct mlx5e_wqe_frag_info *pwi; - wi -= old_nr_frags - sinfo->nr_frags; - for (pwi = head_wi; pwi < wi; pwi++) pwi->frag_page->frags++; } @@ -1647,10 +1646,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi } nr_frags_free = old_nr_frags - sinfo->nr_frags; - if (unlikely(nr_frags_free)) { - wi -= nr_frags_free; + if (unlikely(nr_frags_free)) truesize -= nr_frags_free * frag_info->frag_stride; - } } skb = mlx5e_build_linear_skb( @@ -1666,7 +1663,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi if (xdp_buff_has_frags(&mxbuf->xdp)) { /* sinfo->nr_frags is reset by build_skb, calculate again. */ - xdp_update_skb_frags_info(skb, wi - head_wi - 1, + xdp_update_skb_frags_info(skb, wi - head_wi - nr_frags_free - 1, sinfo->xdp_frags_size, truesize, xdp_buff_get_skb_flags(&mxbuf->xdp)); @@ -1957,14 +1954,13 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w if (prog) { u8 nr_frags_free, old_nr_frags = sinfo->nr_frags; + u8 new_nr_frags; u32 len; if (mlx5e_xdp_handle(rq, prog, mxbuf)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { struct mlx5e_frag_page *pfp; - frag_page -= old_nr_frags - sinfo->nr_frags; - for (pfp = head_page; pfp < frag_page; pfp++) pfp->frags++; @@ -1975,13 +1971,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w return NULL; /* page/packet was consumed by XDP */ } - nr_frags_free = old_nr_frags - sinfo->nr_frags; - if (unlikely(nr_frags_free)) { - frag_page -= nr_frags_free; + new_nr_frags = sinfo->nr_frags; + nr_frags_free = old_nr_frags - new_nr_frags; + if (unlikely(nr_frags_free)) truesize -= (nr_frags_free - 1) * PAGE_SIZE + ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); - } len = mxbuf->xdp.data_end - mxbuf->xdp.data; @@ -2003,7 +1998,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w struct mlx5e_frag_page *pagep; /* sinfo->nr_frags is reset by build_skb, calculate again. */ - xdp_update_skb_frags_info(skb, frag_page - head_page, + xdp_update_skb_frags_info(skb, new_nr_frags, sinfo->xdp_frags_size, truesize, xdp_buff_get_skb_flags(&mxbuf->xdp)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index 26178d0bac92..faccc60fc93a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -1489,24 +1489,24 @@ out: return err; } -static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev) +static u32 mlx5_esw_qos_lag_link_speed_get(struct mlx5_core_dev *mdev, + bool take_rtnl) { struct ethtool_link_ksettings lksettings; struct net_device *slave, *master; u32 speed = SPEED_UNKNOWN; - /* Lock ensures a stable reference to master and slave netdevice - * while port speed of master is queried. - */ - ASSERT_RTNL(); - slave = mlx5_uplink_netdev_get(mdev); if (!slave) goto out; + if (take_rtnl) + rtnl_lock(); master = netdev_master_upper_dev_get(slave); if (master && !__ethtool_get_link_ksettings(master, &lksettings)) speed = lksettings.base.speed; + if (take_rtnl) + rtnl_unlock(); out: mlx5_uplink_netdev_put(mdev, slave); @@ -1514,20 +1514,15 @@ out: } static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max, - bool hold_rtnl_lock, struct netlink_ext_ack *extack) + bool take_rtnl, + struct netlink_ext_ack *extack) { int err; if (!mlx5_lag_is_active(mdev)) goto skip_lag; - if (hold_rtnl_lock) - rtnl_lock(); - - *link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev); - - if (hold_rtnl_lock) - rtnl_unlock(); + *link_speed_max = mlx5_esw_qos_lag_link_speed_get(mdev, take_rtnl); if (*link_speed_max != (u32)SPEED_UNKNOWN) return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index d3af87a94a18..123c96716a54 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1072,10 +1072,11 @@ static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw) static void mlx5_eswitch_event_handler_unregister(struct mlx5_eswitch *esw) { - if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) + if (esw->mode == MLX5_ESWITCH_OFFLOADS && + mlx5_eswitch_is_funcs_handler(esw->dev)) { mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb); - - flush_workqueue(esw->work_queue); + atomic_inc(&esw->esw_funcs.generation); + } } static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 6841caef02d1..c2563bee74df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -335,10 +335,12 @@ struct esw_mc_addr { /* SRIOV only */ struct mlx5_host_work { struct work_struct work; struct mlx5_eswitch *esw; + int work_gen; }; struct mlx5_esw_functions { struct mlx5_nb nb; + atomic_t generation; bool host_funcs_disabled; u16 num_vfs; u16 num_ec_vfs; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 1366f6e489bd..7a9ee36b8dca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1241,21 +1241,17 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, flows[peer_vport->index] = flow; } - if (mlx5_esw_host_functions_enabled(esw->dev)) { - mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, - mlx5_core_max_vfs(peer_dev)) { - esw_set_peer_miss_rule_source_port(esw, peer_esw, - spec, - peer_vport->vport); - - flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), - spec, &flow_act, &dest, 1); - if (IS_ERR(flow)) { - err = PTR_ERR(flow); - goto add_vf_flow_err; - } - flows[peer_vport->index] = flow; + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_vfs(peer_dev)) { + esw_set_peer_miss_rule_source_port(esw, peer_esw, spec, + peer_vport->vport); + flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), + spec, &flow_act, &dest, 1); + if (IS_ERR(flow)) { + err = PTR_ERR(flow); + goto add_vf_flow_err; } + flows[peer_vport->index] = flow; } if (mlx5_core_ec_sriov_enabled(peer_dev)) { @@ -1347,7 +1343,8 @@ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw, mlx5_del_flow_rules(flows[peer_vport->index]); } - if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { + if (mlx5_core_is_ecpf_esw_manager(peer_dev) && + mlx5_esw_host_functions_enabled(peer_dev)) { peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); mlx5_del_flow_rules(flows[peer_vport->index]); } @@ -3582,22 +3579,28 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw) } static void -esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out) +esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, int work_gen, + const u32 *out) { struct devlink *devlink; bool host_pf_disabled; u16 new_num_vfs; + devlink = priv_to_devlink(esw->dev); + devl_lock(devlink); + + /* Stale work from one or more mode changes ago. Bail out. */ + if (work_gen != atomic_read(&esw->esw_funcs.generation)) + goto unlock; + new_num_vfs = MLX5_GET(query_esw_functions_out, out, host_params_context.host_num_of_vfs); host_pf_disabled = MLX5_GET(query_esw_functions_out, out, host_params_context.host_pf_disabled); if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled) - return; + goto unlock; - devlink = priv_to_devlink(esw->dev); - devl_lock(devlink); /* Number of VFs can only change from "0 to x" or "x to 0". */ if (esw->esw_funcs.num_vfs > 0) { mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); @@ -3612,6 +3615,7 @@ esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out) } } esw->esw_funcs.num_vfs = new_num_vfs; +unlock: devl_unlock(devlink); } @@ -3628,7 +3632,7 @@ static void esw_functions_changed_event_handler(struct work_struct *work) if (IS_ERR(out)) goto out; - esw_vfs_changed_event_handler(esw, out); + esw_vfs_changed_event_handler(esw, host_work->work_gen, out); kvfree(out); out: kfree(host_work); @@ -3648,6 +3652,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs); host_work->esw = esw; + host_work->work_gen = atomic_read(&esw_funcs->generation); INIT_WORK(&host_work->work, esw_functions_changed_event_handler); queue_work(esw->work_queue, &host_work->work); diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index a3845edf0e48..f0b5dd752f08 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -3053,6 +3053,11 @@ static void lan743x_phylink_mac_link_up(struct phylink_config *config, else if (speed == SPEED_100) mac_cr |= MAC_CR_CFG_L_; + if (duplex == DUPLEX_FULL) + mac_cr |= MAC_CR_DPX_; + else + mac_cr &= ~MAC_CR_DPX_; + lan743x_csr_write(adapter, MAC_CR, mac_cr); lan743x_ptp_update_latency(adapter, speed); diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 37d2f108a839..786186c9a115 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1934,6 +1934,7 @@ remove_irq: mana_gd_remove_irqs(pdev); free_workqueue: destroy_workqueue(gc->service_wq); + gc->service_wq = NULL; dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err); return err; } diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index ba3467f1e2ea..48a9acea4ab6 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -814,9 +814,6 @@ void mana_hwc_destroy_channel(struct gdma_context *gc) gc->max_num_cqs = 0; } - kfree(hwc->caller_ctx); - hwc->caller_ctx = NULL; - if (hwc->txq) mana_hwc_destroy_wq(hwc, hwc->txq); @@ -826,6 +823,9 @@ void mana_hwc_destroy_channel(struct gdma_context *gc) if (hwc->cq) mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq); + kfree(hwc->caller_ctx); + hwc->caller_ctx = NULL; + mana_gd_free_res_map(&hwc->inflight_msg_res); hwc->num_inflight_msg = 0; diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 933e9d681ded..dca62fb9a3a9 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1770,8 +1770,14 @@ static void mana_poll_tx_cq(struct mana_cq *cq) ndev = txq->ndev; apc = netdev_priv(ndev); + /* Limit CQEs polled to 4 wraparounds of the CQ to ensure the + * doorbell can be rung in time for the hardware's requirement + * of at least one doorbell ring every 8 wraparounds. + */ comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, - CQE_POLLING_BUFFER); + min((cq->gdma_cq->queue_size / + COMP_ENTRY_SIZE) * 4, + CQE_POLLING_BUFFER)); if (comp_read < 1) return; @@ -2156,7 +2162,14 @@ static void mana_poll_rx_cq(struct mana_cq *cq) struct mana_rxq *rxq = cq->rxq; int comp_read, i; - comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); + /* Limit CQEs polled to 4 wraparounds of the CQ to ensure the + * doorbell can be rung in time for the hardware's requirement + * of at least one doorbell ring every 8 wraparounds. + */ + comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, + min((cq->gdma_cq->queue_size / + COMP_ENTRY_SIZE) * 4, + CQE_POLLING_BUFFER)); WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER); rxq->xdp_flush = false; @@ -2201,11 +2214,11 @@ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue) mana_gd_ring_cq(gdma_queue, SET_ARM_BIT); cq->work_done_since_doorbell = 0; napi_complete_done(&cq->napi, w); - } else if (cq->work_done_since_doorbell > - cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) { + } else if (cq->work_done_since_doorbell >= + (cq->gdma_cq->queue_size / COMP_ENTRY_SIZE) * 4) { /* MANA hardware requires at least one doorbell ring every 8 * wraparounds of CQ even if there is no need to arm the CQ. - * This driver rings the doorbell as soon as we have exceeded + * This driver rings the doorbell as soon as it has processed * 4 wraparounds. */ mana_gd_ring_cq(gdma_queue, 0); @@ -3412,6 +3425,7 @@ static int add_adev(struct gdma_dev *gd, const char *name) struct auxiliary_device *adev; struct mana_adev *madev; int ret; + int id; madev = kzalloc_obj(*madev); if (!madev) @@ -3421,7 +3435,8 @@ static int add_adev(struct gdma_dev *gd, const char *name) ret = mana_adev_idx_alloc(); if (ret < 0) goto idx_fail; - adev->id = ret; + id = ret; + adev->id = id; adev->name = name; adev->dev.parent = gd->gdma_context->dev; @@ -3447,7 +3462,7 @@ add_fail: auxiliary_device_uninit(adev); init_fail: - mana_adev_idx_free(adev->id); + mana_adev_idx_free(id); idx_fail: kfree(madev); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 8d040e611d5a..637e635bbf03 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -1719,13 +1719,18 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa) if (ether_addr_equal(netdev->dev_addr, mac)) return 0; - err = ionic_program_mac(lif, mac); - if (err < 0) - return err; + /* Only program macs for virtual functions to avoid losing the permanent + * Mac across warm reset/reboot. + */ + if (lif->ionic->pdev->is_virtfn) { + err = ionic_program_mac(lif, mac); + if (err < 0) + return err; - if (err > 0) - netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n", - __func__); + if (err > 0) + netdev_dbg(netdev, "%s: SET and GET ATTR Mac are not equal-due to old FW running\n", + __func__); + } err = eth_prepare_mac_addr_change(netdev, addr); if (err) diff --git a/drivers/net/ethernet/spacemit/k1_emac.c b/drivers/net/ethernet/spacemit/k1_emac.c index 338a2637b1da..15d43e4a748b 100644 --- a/drivers/net/ethernet/spacemit/k1_emac.c +++ b/drivers/net/ethernet/spacemit/k1_emac.c @@ -565,7 +565,9 @@ static void emac_alloc_rx_desc_buffers(struct emac_priv *priv) DMA_FROM_DEVICE); if (dma_mapping_error(&priv->pdev->dev, rx_buf->dma_addr)) { dev_err_ratelimited(&ndev->dev, "Mapping skb failed\n"); - goto err_free_skb; + dev_kfree_skb_any(skb); + rx_buf->skb = NULL; + break; } rx_desc_addr = &((struct emac_desc *)rx_ring->desc_addr)[i]; @@ -590,10 +592,6 @@ static void emac_alloc_rx_desc_buffers(struct emac_priv *priv) rx_ring->head = i; return; - -err_free_skb: - dev_kfree_skb_any(skb); - rx_buf->skb = NULL; } /* Returns number of packets received */ @@ -735,7 +733,7 @@ static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb) struct emac_desc tx_desc, *tx_desc_addr; struct device *dev = &priv->pdev->dev; struct emac_tx_desc_buffer *tx_buf; - u32 head, old_head, frag_num, f; + u32 head, old_head, frag_num, f, i; bool buf_idx; frag_num = skb_shinfo(skb)->nr_frags; @@ -803,6 +801,15 @@ static void emac_tx_mem_map(struct emac_priv *priv, struct sk_buff *skb) err_free_skb: dev_dstats_tx_dropped(priv->ndev); + + i = old_head; + while (i != head) { + emac_free_tx_buf(priv, i); + + if (++i == tx_ring->total_cnt) + i = 0; + } + dev_kfree_skb_any(skb); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 51c96a738151..33667a26708c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -323,6 +323,7 @@ struct stmmac_priv { void __iomem *ptpaddr; void __iomem *estaddr; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned int num_double_vlans; int sfty_irq; int sfty_ce_irq; int sfty_ue_irq; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e00aa42a1961..6827c99bde8c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -156,6 +156,7 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue); static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue); static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, u32 rxmode, u32 chan); +static int stmmac_vlan_restore(struct stmmac_priv *priv); #ifdef CONFIG_DEBUG_FS static const struct net_device_ops stmmac_netdev_ops; @@ -4107,6 +4108,8 @@ static int __stmmac_open(struct net_device *dev, phylink_start(priv->phylink); + stmmac_vlan_restore(priv); + ret = stmmac_request_irq(dev); if (ret) goto irq_error; @@ -6766,6 +6769,9 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) hash = 0; } + if (!netif_running(priv->dev)) + return 0; + return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); } @@ -6775,6 +6781,7 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double) static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int num_double_vlans; bool is_double = false; int ret; @@ -6786,7 +6793,8 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid is_double = true; set_bit(vid, priv->active_vlans); - ret = stmmac_vlan_update(priv, is_double); + num_double_vlans = priv->num_double_vlans + is_double; + ret = stmmac_vlan_update(priv, num_double_vlans); if (ret) { clear_bit(vid, priv->active_vlans); goto err_pm_put; @@ -6794,9 +6802,15 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid if (priv->hw->num_vlan) { ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); - if (ret) + if (ret) { + clear_bit(vid, priv->active_vlans); + stmmac_vlan_update(priv, priv->num_double_vlans); goto err_pm_put; + } } + + priv->num_double_vlans = num_double_vlans; + err_pm_put: pm_runtime_put(priv->device); @@ -6809,6 +6823,7 @@ err_pm_put: static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int num_double_vlans; bool is_double = false; int ret; @@ -6820,14 +6835,23 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi is_double = true; clear_bit(vid, priv->active_vlans); + num_double_vlans = priv->num_double_vlans - is_double; + ret = stmmac_vlan_update(priv, num_double_vlans); + if (ret) { + set_bit(vid, priv->active_vlans); + goto del_vlan_error; + } if (priv->hw->num_vlan) { ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); - if (ret) + if (ret) { + set_bit(vid, priv->active_vlans); + stmmac_vlan_update(priv, priv->num_double_vlans); goto del_vlan_error; + } } - ret = stmmac_vlan_update(priv, is_double); + priv->num_double_vlans = num_double_vlans; del_vlan_error: pm_runtime_put(priv->device); @@ -6835,6 +6859,23 @@ del_vlan_error: return ret; } +static int stmmac_vlan_restore(struct stmmac_priv *priv) +{ + int ret; + + if (!(priv->dev->features & NETIF_F_VLAN_FEATURES)) + return 0; + + if (priv->hw->num_vlan) + stmmac_restore_hw_vlan_rx_fltr(priv, priv->dev, priv->hw); + + ret = stmmac_vlan_update(priv, priv->num_double_vlans); + if (ret) + netdev_err(priv->dev, "Failed to restore VLANs\n"); + + return ret; +} + static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) { struct stmmac_priv *priv = netdev_priv(dev); @@ -8259,10 +8300,10 @@ int stmmac_resume(struct device *dev) stmmac_init_coalesce(priv); phylink_rx_clk_stop_block(priv->phylink); stmmac_set_rx_mode(ndev); - - stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); phylink_rx_clk_stop_unblock(priv->phylink); + stmmac_vlan_restore(priv); + stmmac_enable_all_queues(priv); stmmac_enable_all_dma_irq(priv); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c index b18404dd5a8b..e24efe3bfedb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c @@ -76,7 +76,9 @@ static int vlan_add_hw_rx_fltr(struct net_device *dev, } hw->vlan_filter[0] = vid; - vlan_write_single(dev, vid); + + if (netif_running(dev)) + vlan_write_single(dev, vid); return 0; } @@ -97,12 +99,15 @@ static int vlan_add_hw_rx_fltr(struct net_device *dev, return -EPERM; } - ret = vlan_write_filter(dev, hw, index, val); + if (netif_running(dev)) { + ret = vlan_write_filter(dev, hw, index, val); + if (ret) + return ret; + } - if (!ret) - hw->vlan_filter[index] = val; + hw->vlan_filter[index] = val; - return ret; + return 0; } static int vlan_del_hw_rx_fltr(struct net_device *dev, @@ -115,7 +120,9 @@ static int vlan_del_hw_rx_fltr(struct net_device *dev, if (hw->num_vlan == 1) { if ((hw->vlan_filter[0] & VLAN_TAG_VID) == vid) { hw->vlan_filter[0] = 0; - vlan_write_single(dev, 0); + + if (netif_running(dev)) + vlan_write_single(dev, 0); } return 0; } @@ -124,25 +131,23 @@ static int vlan_del_hw_rx_fltr(struct net_device *dev, for (i = 0; i < hw->num_vlan; i++) { if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) && ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid)) { - ret = vlan_write_filter(dev, hw, i, 0); - if (!ret) - hw->vlan_filter[i] = 0; - else - return ret; + if (netif_running(dev)) { + ret = vlan_write_filter(dev, hw, i, 0); + if (ret) + return ret; + } + + hw->vlan_filter[i] = 0; } } - return ret; + return 0; } static void vlan_restore_hw_rx_fltr(struct net_device *dev, struct mac_device_info *hw) { - void __iomem *ioaddr = hw->pcsr; - u32 value; - u32 hash; - u32 val; int i; /* Single Rx VLAN Filter */ @@ -152,19 +157,8 @@ static void vlan_restore_hw_rx_fltr(struct net_device *dev, } /* Extended Rx VLAN Filter Enable */ - for (i = 0; i < hw->num_vlan; i++) { - if (hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) { - val = hw->vlan_filter[i]; - vlan_write_filter(dev, hw, i, val); - } - } - - hash = readl(ioaddr + VLAN_HASH_TABLE); - if (hash & VLAN_VLHT) { - value = readl(ioaddr + VLAN_TAG); - value |= VLAN_VTHM; - writel(value, ioaddr + VLAN_TAG); - } + for (i = 0; i < hw->num_vlan; i++) + vlan_write_filter(dev, hw, i, hw->vlan_filter[i]); } static void vlan_update_hash(struct mac_device_info *hw, u32 hash, @@ -183,6 +177,10 @@ static void vlan_update_hash(struct mac_device_info *hw, u32 hash, value |= VLAN_EDVLP; value |= VLAN_ESVL; value |= VLAN_DOVLTC; + } else { + value &= ~VLAN_EDVLP; + value &= ~VLAN_ESVL; + value &= ~VLAN_DOVLTC; } writel(value, ioaddr + VLAN_TAG); @@ -193,6 +191,10 @@ static void vlan_update_hash(struct mac_device_info *hw, u32 hash, value |= VLAN_EDVLP; value |= VLAN_ESVL; value |= VLAN_DOVLTC; + } else { + value &= ~VLAN_EDVLP; + value &= ~VLAN_ESVL; + value &= ~VLAN_DOVLTC; } writel(value | perfect_match, ioaddr + VLAN_TAG); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 5924db6be3fe..265ce5479915 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -391,7 +391,7 @@ static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev) cpsw_ale_set_allmulti(common->ale, ndev->flags & IFF_ALLMULTI, port->port_id); - port_mask = ALE_PORT_HOST; + port_mask = BIT(port->port_id) | ALE_PORT_HOST; /* Clear all mcast from ALE */ cpsw_ale_flush_multicast(common->ale, port_mask, -1); @@ -1351,7 +1351,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, ndev_priv = netdev_priv(ndev); am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark); skb_put(skb, pkt_len); - if (port->rx_ts_enabled) + if (port->rx_ts_filter) am65_cpts_rx_timestamp(common->cpts, skb); skb_mark_for_recycle(skb); skb->protocol = eth_type_trans(skb, ndev); @@ -1811,11 +1811,14 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, switch (cfg->rx_filter) { case HWTSTAMP_FILTER_NONE: - port->rx_ts_enabled = false; + port->rx_ts_filter = HWTSTAMP_FILTER_NONE; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + port->rx_ts_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: @@ -1825,8 +1828,8 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - port->rx_ts_enabled = true; - cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT | HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + port->rx_ts_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: @@ -1863,7 +1866,7 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev, ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN | AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN; - if (port->rx_ts_enabled) + if (port->rx_ts_filter) ts_ctrl |= AM65_CPSW_TS_RX_ANX_ALL_EN | AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN; @@ -1888,8 +1891,7 @@ static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev, cfg->flags = 0; cfg->tx_type = port->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - cfg->rx_filter = port->rx_ts_enabled ? HWTSTAMP_FILTER_PTP_V2_EVENT | - HWTSTAMP_FILTER_PTP_V1_L4_EVENT : HWTSTAMP_FILTER_NONE; + cfg->rx_filter = port->rx_ts_filter; return 0; } diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index 917c37e4e89b..7750448e4746 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -52,7 +52,7 @@ struct am65_cpsw_port { bool disabled; struct am65_cpsw_slave_data slave; bool tx_ts_enabled; - bool rx_ts_enabled; + enum hwtstamp_rx_filters rx_ts_filter; struct am65_cpsw_qos qos; struct devlink_port devlink_port; struct bpf_prog *xdp_prog; diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index bb969dd435b4..be7b69319221 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -450,14 +450,13 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry, ale->port_mask_bits); if ((mask & port_mask) == 0) return; /* ports dont intersect, not interested */ - mask &= ~port_mask; + mask &= (~port_mask | ALE_PORT_HOST); - /* free if only remaining port is host port */ - if (mask) + if (mask == 0x0 || mask == ALE_PORT_HOST) + cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); + else cpsw_ale_set_port_mask(ale_entry, mask, ale->port_mask_bits); - else - cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); } int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid) diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index 0cf9dfe0fa36..fd4e7622f123 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -962,7 +962,6 @@ static int emac_rx_packet_zc(struct prueth_emac *emac, u32 flow_id, pkt_len -= 4; cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); psdata = cppi5_hdesc_get_psdata(desc_rx); - k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); count++; xsk_buff_set_size(xdp, pkt_len); xsk_buff_dma_sync_for_cpu(xdp); @@ -988,6 +987,7 @@ static int emac_rx_packet_zc(struct prueth_emac *emac, u32 flow_id, emac_dispatch_skb_zc(emac, xdp, psdata); xsk_buff_free(xdp); } + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); } if (xdp_status & ICSSG_XDP_REDIR) @@ -1057,7 +1057,6 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) /* firmware adds 4 CRC bytes, strip them */ pkt_len -= 4; cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); - k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); /* if allocation fails we drop the packet but push the * descriptor back to the ring with old page to prevent a stall @@ -1075,6 +1074,11 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) xdp_prepare_buff(&xdp, pa, PRUETH_HEADROOM, pkt_len, false); *xdp_state = emac_run_xdp(emac, &xdp, &pkt_len); + if (*xdp_state == ICSSG_XDP_CONSUMED) { + page_pool_recycle_direct(pool, page); + goto requeue; + } + if (*xdp_state != ICSSG_XDP_PASS) goto requeue; headroom = xdp.data - xdp.data_hard_start; @@ -1110,6 +1114,7 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id, u32 *xdp_state) ndev->stats.rx_packets++; requeue: + k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); /* queue another RX DMA */ ret = prueth_dma_rx_push_mapped(emac, &emac->rx_chns, new_page, PRUETH_MAX_PKT_SIZE); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 0939994c932f..42a881bee109 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -273,6 +273,14 @@ static int prueth_emac_common_start(struct prueth *prueth) if (ret) goto disable_class; + /* Reset link state to force reconfiguration in + * emac_adjust_link(). Without this, if the link was already up + * before restart, emac_adjust_link() won't detect any state + * change and will skip critical configuration like writing + * speed to firmware. + */ + emac->link = 0; + mutex_lock(&emac->ndev->phydev->lock); emac_adjust_link(emac->ndev); mutex_unlock(&emac->ndev->phydev->lock); |
