e1000 driver update Signed-off-by: Jeff Kirsher <[EMAIL PROTECTED]> Signed-off-by: John Ronciak <[EMAIL PROTECTED]> Signed-off-by: Jesse Brandeburg <[EMAIL PROTECTED]>
4. Multiple Fixes - Fix RX fifo errors and statistics counting - Fix for SoL/IDER sessions - Fix flow control watermarks - Fix DPRINTK statement diff -up linux-2.6/drivers/net/e1000/e1000.h linux-2.6.new/drivers/net/e1000/e1000.h --- linux-2.6/drivers/net/e1000/e1000.h 2005-11-14 16:20:34.000000000 -0800 +++ linux-2.6.new/drivers/net/e1000/e1000.h 2005-11-04 01:23:40.000000000 -0800 @@ -270,6 +270,7 @@ struct e1000_adapter { #ifdef CONFIG_E1000_MQ struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */ #endif + unsigned long tx_queue_len; uint32_t txd_cmd; uint32_t tx_int_delay; uint32_t tx_abs_int_delay; @@ -277,9 +278,11 @@ struct e1000_adapter { uint64_t gotcl_old; uint64_t tpt_old; uint64_t colc_old; + uint32_t tx_timeout_count; uint32_t tx_fifo_head; uint32_t tx_head_addr; uint32_t tx_fifo_size; + uint8_t tx_timeout_factor; atomic_t tx_fifo_stall; boolean_t pcix_82544; boolean_t detect_tx_hung; @@ -310,6 +305,7 @@ struct e1000_adapter { uint64_t hw_csum_err; uint64_t hw_csum_good; uint64_t rx_hdr_split; + uint32_t alloc_rx_buff_failed; uint32_t rx_int_delay; uint32_t rx_abs_int_delay; boolean_t rx_csum; diff -up linux-2.6/drivers/net/e1000/e1000_ethtool.c linux-2.6.new/drivers/net/e1000/e1000_ethtool.c --- linux-2.6/drivers/net/e1000/e1000_ethtool.c 2005-11-14 16:20:34.000000000 -0800 +++ linux-2.6.new/drivers/net/e1000/e1000_ethtool.c 2005-11-04 01:23:40.000000000 -0800 @@ -80,6 +80,7 @@ static const struct e1000_stats e1000_gs { "tx_deferred_ok", E1000_STAT(stats.dc) }, { "tx_single_coll_ok", E1000_STAT(stats.scc) }, { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, { "rx_long_length_errors", E1000_STAT(stats.roc) }, { "rx_short_length_errors", E1000_STAT(stats.ruc) }, { "rx_align_errors", E1000_STAT(stats.algnerrc) }, @@ -93,6 +98,7 @@ static const struct e1000_stats e1000_gs { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, { "rx_header_split", E1000_STAT(rx_hdr_split) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, }; #define E1000_STATS_LEN \ sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) @@ -183,6 +198,14 @@ e1000_set_settings(struct net_device *ne struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + /* When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed */ + if (e1000_check_phy_reset_block(&adapter->hw)) { + DPRINTK(DRV, ERR, "Cannot change link characteristics " + "when SoL/IDER is active.\n"); + return -EINVAL; + } + if(ecmd->autoneg == AUTONEG_ENABLE) { hw->autoneg = 1; if(hw->media_type == e1000_media_type_fiber) @@ -1480,11 +1490,22 @@ e1000_run_loopback_test(struct e1000_ada static int e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data) { - if((*data = e1000_setup_desc_rings(adapter))) goto err_loopback; - if((*data = e1000_setup_loopback_test(adapter))) goto err_loopback; - *data = e1000_run_loopback_test(adapter); - e1000_loopback_cleanup(adapter); - e1000_free_desc_rings(adapter); + /* PHY loopback cannot be performed if SoL/IDER + * sessions are active */ + if (!e1000_check_phy_reset_block(&adapter->hw)) { + if((*data = e1000_setup_desc_rings(adapter))) + goto err_loopback; + if((*data = e1000_setup_loopback_test(adapter))) + goto err_loopback; + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + e1000_free_desc_rings(adapter); + } else { + DPRINTK(DRV, ERR, "Cannot do PHY loopback test " + "when SoL/IDER is active.\n"); + *data = 0; + } + err_loopback: return *data; } diff -up linux-2.6/drivers/net/e1000/e1000_hw.c linux-2.6.new/drivers/net/e1000/e1000_hw.c --- linux-2.6/drivers/net/e1000/e1000_hw.c 2005-11-14 16:20:34.000000000 -0800 +++ linux-2.6.new/drivers/net/e1000/e1000_hw.c 2005-11-04 01:23:40.000000000 -0800 @@ -838,6 +838,11 @@ e1000_setup_link(struct e1000_hw *hw) DEBUGFUNC("e1000_setup_link"); + /* In the case of the phy reset being blocked, we already have a link. + * We do not have to set it up again. */ + if (e1000_check_phy_reset_block(hw)) + return E1000_SUCCESS; + /* Read and store word 0x0F of the EEPROM. This word contains bits * that determine the hardware's default PAUSE (flow control) mode, * a bit that determines whether the HW defaults to enabling or diff -up linux-2.6/drivers/net/e1000/e1000_main.c linux-2.6.new/drivers/net/e1000/e1000_main.c --- linux-2.6/drivers/net/e1000/e1000_main.c 2005-11-14 16:20:34.000000000 -0800 +++ linux-2.6.new/drivers/net/e1000/e1000_main.c 2005-11-04 01:23:40.000000000 -0800 @@ -368,6 +368,8 @@ e1000_up(struct e1000_adapter *adapter) return err; } + adapter->tx_queue_len = netdev->tx_queue_len; + mod_timer(&adapter->watchdog_timer, jiffies); #ifdef CONFIG_E1000_NAPI @@ -382,6 +384,8 @@ void e1000_down(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; + boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) && + e1000_check_mng_mode(&adapter->hw); e1000_irq_disable(adapter); #ifdef CONFIG_E1000_MQ @@ -400,6 +404,7 @@ e1000_down(struct e1000_adapter *adapter #ifdef CONFIG_E1000_NAPI netif_poll_disable(netdev); #endif + netdev->tx_queue_len = adapter->tx_queue_len; adapter->link_speed = 0; adapter->link_duplex = 0; netif_carrier_off(netdev); @@ -409,12 +414,16 @@ e1000_down(struct e1000_adapter *adapter e1000_clean_all_tx_rings(adapter); e1000_clean_all_rx_rings(adapter); - /* If WoL is not enabled and management mode is not IAMT - * Power down the PHY so no link is implied when interface is down */ + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is TRUE * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active */ if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && adapter->hw.media_type == e1000_media_type_copper && - !e1000_check_mng_mode(&adapter->hw) && - !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) { + !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) && + !mng_mode_enabled && + !e1000_check_phy_reset_block(&adapter->hw)) { uint16_t mii_reg; e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); mii_reg |= MII_CR_POWER_DOWN; @@ -426,10 +435,8 @@ e1000_down(struct e1000_adapter *adapter void e1000_reset(struct e1000_adapter *adapter) { - struct net_device *netdev = adapter->netdev; uint32_t pba, manc; uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; - uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF; /* Repartition Pba for greater than 9k mtu * To take effect CTRL.RST is required. @@ -453,15 +460,8 @@ e1000_reset(struct e1000_adapter *adapte } if((adapter->hw.mac_type != e1000_82573) && - (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) { + (adapter->netdev->mtu > E1000_RXBUFFER_8192)) pba -= 8; /* allocate more FIFO for Tx */ - /* send an XOFF when there is enough space in the - * Rx FIFO to hold one extra full size Rx packet - */ - fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE + - ETHERNET_FCS_SIZE + 1; - fc_low_water_mark = fc_high_water_mark + 8; - } if(adapter->hw.mac_type == e1000_82547) { @@ -475,10 +475,12 @@ e1000_reset(struct e1000_adapter *adapte E1000_WRITE_REG(&adapter->hw, PBA, pba); /* flow control settings */ - adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) - - fc_high_water_mark; - adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) - - fc_low_water_mark; + /* set the FC high water mark to 90% of the FIFO size */ + fc_high_water_mark = ((pba * 9216)/10); + fc_high_water_mark &= 0xfff8; /* required to clear the lower bits */ + + adapter->hw.fc_high_water = fc_high_water_mark; + adapter->hw.fc_low_water = fc_high_water_mark - 8; adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; adapter->hw.fc_send_xon = 1; adapter->hw.fc = adapter->hw.original_fc; @@ -765,6 +767,11 @@ e1000_probe(struct pci_dev *pdev, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); break; case e1000_82573: + /* Do not set DRV_LOAD if f/w is in AMT mode. + It will eventually be set when i/f is opened. */ + if (e1000_check_mng_mode(&adapter->hw)) + break; + swsm = E1000_READ_REG(&adapter->hw, SWSM); E1000_WRITE_REG(&adapter->hw, SWSM, swsm | E1000_SWSM_DRV_LOAD); @@ -833,6 +840,11 @@ e1000_remove(struct pci_dev *pdev) ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); break; case e1000_82573: + /* If f/w is in AMT mode, DRV_LOAD bit is cleared in + e1000_close. So no need to reset it here. */ + if (e1000_check_mng_mode(&adapter->hw)) + break; + swsm = E1000_READ_REG(&adapter->hw, SWSM); E1000_WRITE_REG(&adapter->hw, SWSM, swsm & ~E1000_SWSM_DRV_LOAD); @@ -1097,6 +1145,15 @@ e1000_open(struct net_device *netdev) e1000_update_mng_vlan(adapter); } + /* if AMT is enabled, let the firmware know that the n/w interface + * is now open*/ + if (adapter->hw.mac_type == e1000_82573 && + e1000_check_mng_mode(&adapter->hw)) { + uint32_t swsm = E1000_READ_REG(&adapter->hw, SWSM); + E1000_WRITE_REG(&adapter->hw, SWSM, + swsm | E1000_SWSM_DRV_LOAD); + } + return E1000_SUCCESS; err_up: @@ -1135,6 +1156,15 @@ e1000_close(struct net_device *netdev) E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); } + + /* if AMT is enabled, let the firmware know that the n/w interface + * is now closed */ + if (adapter->hw.mac_type == e1000_82573 && + e1000_check_mng_mode(&adapter->hw)) { + uint32_t swsm = E1000_READ_REG(&adapter->hw, SWSM); + E1000_WRITE_REG(&adapter->hw, SWSM, + swsm & ~E1000_SWSM_DRV_LOAD); + } return 0; } @@ -1538,6 +1592,9 @@ e1000_setup_rctl(struct e1000_adapter *a E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); + if (adapter->hw.mac_type > e1000_82543) + rctl |= E1000_RCTL_SECRC; + if(adapter->hw.tbi_compatibility_on == 1) rctl |= E1000_RCTL_SBP; else @@ -1802,12 +1866,10 @@ static inline void buffer_info->dma, buffer_info->length, PCI_DMA_TODEVICE); - buffer_info->dma = 0; } - if(buffer_info->skb) { + if (buffer_info->skb) dev_kfree_skb_any(buffer_info->skb); - buffer_info->skb = NULL; - } + memset(buffer_info, 0, sizeof(struct e1000_buffer)); } /** @@ -2255,6 +2309,21 @@ e1000_watchdog_task(struct e1000_adapter adapter->link_duplex == FULL_DUPLEX ? "Full Duplex" : "Half Duplex"); + /* tweak tx_queue_len according to speed/duplex */ + netdev->tx_queue_len = adapter->tx_queue_len; + adapter->tx_timeout_factor = 1; + if (adapter->link_duplex == HALF_DUPLEX) { + switch (adapter->link_speed) { + case SPEED_10: + netdev->tx_queue_len = 10; + adapter->tx_timeout_factor = 8; + break; + case SPEED_100: + netdev->tx_queue_len = 100; + break; + } + } + netif_carrier_on(netdev); netif_wake_queue(netdev); mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); @@ -2343,6 +2397,7 @@ e1000_tso(struct e1000_adapter *adapter, uint16_t ipcse = 0, tucse, mss; uint8_t ipcss, ipcso, tucss, tucso, hdr_len; int err; + struct e1000_buffer *buffer_info; if(skb_shinfo(skb)->tso_size) { if (skb_header_cloned(skb)) { @@ -2387,6 +2434,7 @@ e1000_tso(struct e1000_adapter *adapter, i = tx_ring->next_to_use; context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; context_desc->lower_setup.ip_fields.ipcss = ipcss; context_desc->lower_setup.ip_fields.ipcso = ipcso; @@ -2398,6 +2452,8 @@ e1000_tso(struct e1000_adapter *adapter, context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; context_desc->cmd_and_length = cpu_to_le32(cmd_length); + buffer_info->time_stamp = jiffies; + if (++i == tx_ring->count) i = 0; tx_ring->next_to_use = i; @@ -2415,11 +2469,13 @@ e1000_tx_csum(struct e1000_adapter *adap struct e1000_context_desc *context_desc; unsigned int i; uint8_t css; + struct e1000_buffer *buffer_info; if(likely(skb->ip_summed == CHECKSUM_HW)) { css = skb->h.raw - skb->data; i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; context_desc = E1000_CONTEXT_DESC(*tx_ring, i); context_desc->upper_setup.tcp_fields.tucss = css; @@ -2428,6 +2480,8 @@ e1000_tx_csum(struct e1000_adapter *adap context_desc->tcp_seg_setup.data = 0; context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); + buffer_info->time_stamp = jiffies; + if (unlikely(++i == tx_ring->count)) i = 0; tx_ring->next_to_use = i; @@ -2651,19 +2695,7 @@ e1000_transfer_dhcp_info(struct e1000_ad E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) ) return 0; } - if(htons(ETH_P_IP) == skb->protocol) { - const struct iphdr *ip = skb->nh.iph; - if(IPPROTO_UDP == ip->protocol) { - struct udphdr *udp = (struct udphdr *)(skb->h.uh); - if(ntohs(udp->dest) == 67) { - offset = (uint8_t *)udp + 8 - skb->data; - length = skb->len - offset; - - return e1000_mng_write_dhcp_info(hw, - (uint8_t *)udp + 8, length); - } - } - } else if((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { + if ((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) { struct ethhdr *eth = (struct ethhdr *) skb->data; if((htons(ETH_P_IP) == eth->h_proto)) { const struct iphdr *ip = @@ -2717,7 +2759,26 @@ e1000_xmit_frame(struct sk_buff *skb, st #ifdef NETIF_F_TSO mss = skb_shinfo(skb)->tso_size; - /* The controller does a simple calculation to + /* TSO Workaround for 82571/2 Controllers -- if skb->data + * points to just header, pull a few bytes of payload from + * frags into skb->data */ + if (mss) { + uint8_t hdr_len; + hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); + if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) && + (adapter->hw.mac_type == e1000_82571 || + adapter->hw.mac_type == e1000_82572)) { + unsigned int pull_size; + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + printk(KERN_ERR "__pskb_pull_tail failed.\n"); + dev_kfree_skb_any(skb); + return -EFAULT; + } + } + } + + /* The controller does a simple calculation to * make sure there is enough room in the FIFO before * initiating the DMA for each buffer. The calc is: * 4 = ceil(buffer len/mss). To make sure we don't @@ -2728,6 +2789,7 @@ e1000_xmit_frame(struct sk_buff *skb, st max_txd_pwr = fls(max_per_txd) - 1; } + /* reserve a descriptor for the offload context */ if((mss) || (skb->ip_summed == CHECKSUM_HW)) count++; count++; @@ -2762,26 +2798,6 @@ e1000_xmit_frame(struct sk_buff *skb, st if(adapter->pcix_82544) count += nr_frags; -#ifdef NETIF_F_TSO - /* TSO Workaround for 82571/2 Controllers -- if skb->data - * points to just header, pull a few bytes of payload from - * frags into skb->data */ - if (skb_shinfo(skb)->tso_size) { - uint8_t hdr_len; - hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2)); - if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) && - (adapter->hw.mac_type == e1000_82571 || - adapter->hw.mac_type == e1000_82572)) { - unsigned int pull_size; - pull_size = min((unsigned int)4, skb->data_len); - if (!__pskb_pull_tail(skb, pull_size)) { - printk(KERN_ERR "__pskb_pull_tail failed.\n"); - dev_kfree_skb_any(skb); - return -EFAULT; - } - } - } -#endif if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) e1000_transfer_dhcp_info(adapter, skb); @@ -2869,6 +2911,7 @@ e1000_tx_timeout_task(struct net_device { struct e1000_adapter *adapter = netdev_priv(netdev); + adapter->tx_timeout_count++; e1000_down(adapter); e1000_up(adapter); } @@ -2886,7 +2929,7 @@ e1000_get_stats(struct net_device *netde { struct e1000_adapter *adapter = netdev_priv(netdev); - e1000_update_stats(adapter); + /* only return the current stats */ return &adapter->net_stats; } @@ -3079,7 +3122,6 @@ e1000_update_stats(struct e1000_adapter adapter->net_stats.rx_length_errors = adapter->stats.rlec; adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; - adapter->net_stats.rx_fifo_errors = adapter->stats.mpc; adapter->net_stats.rx_missed_errors = adapter->stats.mpc; /* Tx Errors */ @@ -3285,11 +3328,8 @@ e1000_clean_tx_irq(struct e1000_adapter cleaned = (i == eop); e1000_unmap_and_free_tx_resource(adapter, buffer_info); + memset(tx_desc, 0, sizeof(struct e1000_tx_desc)); - tx_desc->buffer_addr = 0; - tx_desc->lower.data = 0; - tx_desc->upper.data = 0; - if(unlikely(++i == tx_ring->count)) i = 0; } @@ -3785,6 +3757,7 @@ e1000_alloc_rx_buffers(struct e1000_adap if(unlikely(!skb)) { /* Better luck next round */ + adapter->alloc_rx_buff_failed++; break; } @@ -3896,8 +3862,10 @@ e1000_alloc_rx_buffers_ps(struct e1000_a if (likely(!ps_page->ps_page[j])) { ps_page->ps_page[j] = alloc_page(GFP_ATOMIC); - if (unlikely(!ps_page->ps_page[j])) + if (unlikely(!ps_page->ps_page[j])) { + adapter->alloc_rx_buff_failed++; goto no_buffers; + } ps_page_dma->ps_page_dma[j] = pci_map_page(pdev, ps_page->ps_page[j], @@ -3916,8 +3958,10 @@ e1000_alloc_rx_buffers_ps(struct e1000_a skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); - if(unlikely(!skb)) + if (unlikely(!skb)) { + adapter->alloc_rx_buff_failed++; break; + } /* Make buffer alignment 2 beyond a 16 byte boundary * this will result in a 16 byte aligned IP header after @@ -4405,6 +4449,11 @@ e1000_suspend(struct pci_dev *pdev, pm_m ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); break; case e1000_82573: + /* If f/w is in AMT mode, DRV_LOAD bit is cleared in + e1000_close. So no need to reset it here. */ + if (e1000_check_mng_mode(&adapter->hw)) + break; + swsm = E1000_READ_REG(&adapter->hw, SWSM); E1000_WRITE_REG(&adapter->hw, SWSM, swsm & ~E1000_SWSM_DRV_LOAD); @@ -4458,6 +4507,11 @@ e1000_resume(struct pci_dev *pdev) ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); break; case e1000_82573: + /* Do not set DRV_LOAD if f/w is in AMT mode. + It will eventually be set when i/f is opened. */ + if (e1000_check_mng_mode(&adapter->hw)) + break; + swsm = E1000_READ_REG(&adapter->hw, SWSM); E1000_WRITE_REG(&adapter->hw, SWSM, swsm | E1000_SWSM_DRV_LOAD); diff -up linux-2.6/drivers/net/e1000/e1000_param.c linux-2.6.new/drivers/net/e1000/e1000_param.c --- linux-2.6/drivers/net/e1000/e1000_param.c 2005-11-14 16:20:34.000000000 -0800 +++ linux-2.6.new/drivers/net/e1000/e1000_param.c 2005-11-04 01:23:41.000000000 -0800 @@ -584,7 +584,13 @@ e1000_check_copper_options(struct e1000_ .p = dplx_list }} }; - if (num_Duplex > bd) { + if (e1000_check_phy_reset_block(&adapter->hw)) { + DPRINTK(PROBE, INFO, + "Link active due to SoL/IDER Session. " + "Speed/Duplex/AutoNeg parameter ignored.\n"); + return; + } + if(num_Duplex > bd) { dplx = Duplex[bd]; e1000_validate_option(&dplx, &opt, adapter); } else { Cheers, Jeff - To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to [EMAIL PROTECTED] More majordomo info at http://vger.kernel.org/majordomo-info.html