Replace variables associated with the former padding
solution with skb->* expressions. They are not needed
anymore.

Signed-off-by: Armin Wolf <w_ar...@gmx.de>
---
 drivers/net/ethernet/8390/lib8390.c | 15 ++++++---------
 1 file changed, 6 insertions(+), 9 deletions(-)

diff --git a/drivers/net/ethernet/8390/lib8390.c 
b/drivers/net/ethernet/8390/lib8390.c
index b3499714f7e0..47e2962eff56 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -305,17 +305,14 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
 {
        unsigned long e8390_base = dev->base_addr;
        struct ei_device *ei_local = netdev_priv(dev);
-       int send_length, output_page;
+       int output_page;
        unsigned long flags;
-       char *data;

        /* The Hardware does not pad undersized frames */
        if (eth_skb_pad(skb)) {
                dev->stats.tx_dropped++;
                return NETDEV_TX_OK;
        }
-       data = skb->data;
-       send_length = skb->len;

        /* Mask interrupts from the ethercard.
           SMP: We have to grab the lock here otherwise the IRQ handler
@@ -347,7 +344,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,

        if (ei_local->tx1 == 0) {
                output_page = ei_local->tx_start_page;
-               ei_local->tx1 = send_length;
+               ei_local->tx1 = skb->len;
                if ((netif_msg_tx_queued(ei_local)) &&
                    ei_local->tx2 > 0)
                        netdev_dbg(dev,
@@ -355,7 +352,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
                                   ei_local->tx2, ei_local->lasttx, 
ei_local->txing);
        } else if (ei_local->tx2 == 0) {
                output_page = ei_local->tx_start_page + TX_PAGES/2;
-               ei_local->tx2 = send_length;
+               ei_local->tx2 = skb->len;
                if ((netif_msg_tx_queued(ei_local)) &&
                    ei_local->tx1 > 0)
                        netdev_dbg(dev,
@@ -380,11 +377,11 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
         * trigger the send later, upon receiving a Tx done interrupt.
         */

-       ei_block_output(dev, send_length, data, output_page);
+       ei_block_output(dev, skb->len, skb->data, output_page);

        if (!ei_local->txing) {
                ei_local->txing = 1;
-               NS8390_trigger_send(dev, send_length, output_page);
+               NS8390_trigger_send(dev, skb->len, output_page);
                if (output_page == ei_local->tx_start_page) {
                        ei_local->tx1 = -1;
                        ei_local->lasttx = -1;
@@ -407,8 +404,8 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
        spin_unlock(&ei_local->page_lock);
        enable_irq_lockdep_irqrestore(dev->irq, &flags);
        skb_tx_timestamp(skb);
+       dev->stats.tx_bytes += skb->len;
        dev_consume_skb_any(skb);
-       dev->stats.tx_bytes += send_length;

        return NETDEV_TX_OK;
 }
--
2.20.1

Reply via email to