[PATCH] chelsio: add support for other 10G boards
[powerpc.git] / drivers / net / e1000 / e1000_main.c
index 44ba522..7a08288 100644 (file)
@@ -27,6 +27,7 @@
 *******************************************************************************/
 
 #include "e1000.h"
+#include <net/ip6_checksum.h>
 
 char e1000_driver_name[] = "e1000";
 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@ -35,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
-#define DRV_VERSION "7.2.9-k4"DRIVERNAPI
+#define DRV_VERSION "7.3.15-k2"DRIVERNAPI
 char e1000_driver_version[] = DRV_VERSION;
 static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
@@ -103,6 +104,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
        INTEL_E1000_ETHERNET_DEVICE(0x10B9),
        INTEL_E1000_ETHERNET_DEVICE(0x10BA),
        INTEL_E1000_ETHERNET_DEVICE(0x10BB),
+       INTEL_E1000_ETHERNET_DEVICE(0x10BC),
+       INTEL_E1000_ETHERNET_DEVICE(0x10C4),
+       INTEL_E1000_ETHERNET_DEVICE(0x10C5),
        /* required last entry */
        {0,}
 };
@@ -154,6 +158,9 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 static int e1000_set_mac(struct net_device *netdev, void *p);
 static irqreturn_t e1000_intr(int irq, void *data);
+#ifdef CONFIG_PCI_MSI
+static irqreturn_t e1000_intr_msi(int irq, void *data);
+#endif
 static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
                                     struct e1000_tx_ring *tx_ring);
 #ifdef CONFIG_E1000_NAPI
@@ -285,7 +292,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
 
        flags = IRQF_SHARED;
 #ifdef CONFIG_PCI_MSI
-       if (adapter->hw.mac_type > e1000_82547_rev_2) {
+       if (adapter->hw.mac_type >= e1000_82571) {
                adapter->have_msi = TRUE;
                if ((err = pci_enable_msi(adapter->pdev))) {
                        DPRINTK(PROBE, ERR,
@@ -293,8 +300,14 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
                        adapter->have_msi = FALSE;
                }
        }
-       if (adapter->have_msi)
+       if (adapter->have_msi) {
                flags &= ~IRQF_SHARED;
+               err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags,
+                                 netdev->name, netdev);
+               if (err)
+                       DPRINTK(PROBE, ERR,
+                              "Unable to allocate interrupt Error: %d\n", err);
+       } else
 #endif
        if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
                               netdev->name, netdev)))
@@ -961,6 +974,7 @@ e1000_probe(struct pci_dev *pdev,
                break;
        case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
        case E1000_DEV_ID_82571EB_QUAD_COPPER:
+       case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
                /* if quad port adapter, disable WoL on all but port A */
                if (global_quad_port_a != 0)
                        adapter->eeprom_wol = 0;
@@ -1576,8 +1590,11 @@ e1000_configure_tx(struct e1000_adapter *adapter)
        e1000_config_collision_dist(hw);
 
        /* Setup Transmit Descriptor Settings for eop descriptor */
-       adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
-               E1000_TXD_CMD_IFCS;
+       adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+       /* only set IDE if we are delaying interrupts using the timers */
+       if (adapter->tx_int_delay)
+               adapter->txd_cmd |= E1000_TXD_CMD_IDE;
 
        if (hw->mac_type < e1000_82543)
                adapter->txd_cmd |= E1000_TXD_CMD_RPS;
@@ -1881,7 +1898,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
 
        if (hw->mac_type >= e1000_82540) {
                E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
-               if (adapter->itr > 1)
+               if (adapter->itr_setting != 0)
                        E1000_WRITE_REG(hw, ITR,
                                1000000000 / (adapter->itr * 256));
        }
@@ -1891,11 +1908,11 @@ e1000_configure_rx(struct e1000_adapter *adapter)
                /* Reset delay timers after every interrupt */
                ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
 #ifdef CONFIG_E1000_NAPI
-               /* Auto-Mask interrupts upon ICR read. */
+               /* Auto-Mask interrupts upon ICR access */
                ctrl_ext |= E1000_CTRL_EXT_IAME;
+               E1000_WRITE_REG(hw, IAM, 0xffffffff);
 #endif
                E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
-               E1000_WRITE_REG(hw, IAM, ~0);
                E1000_WRITE_FLUSH(hw);
        }
 
@@ -1993,10 +2010,13 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
                                buffer_info->dma,
                                buffer_info->length,
                                PCI_DMA_TODEVICE);
+               buffer_info->dma = 0;
        }
-       if (buffer_info->skb)
+       if (buffer_info->skb) {
                dev_kfree_skb_any(buffer_info->skb);
-       memset(buffer_info, 0, sizeof(struct e1000_buffer));
+               buffer_info->skb = NULL;
+       }
+       /* buffer_info must be completely set up in the transmit path */
 }
 
 /**
@@ -2557,19 +2577,6 @@ e1000_watchdog(unsigned long data)
                }
        }
 
-       /* Dynamic mode for Interrupt Throttle Rate (ITR) */
-       if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
-               /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
-                * asymmetrical Tx or Rx gets ITR=8000; everyone
-                * else is between 2000-8000. */
-               uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
-               uint32_t dif = (adapter->gotcl > adapter->gorcl ?
-                       adapter->gotcl - adapter->gorcl :
-                       adapter->gorcl - adapter->gotcl) / 10000;
-               uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
-               E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
-       }
-
        /* Cause software interrupt to ensure rx ring is cleaned */
        E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
 
@@ -2585,6 +2592,135 @@ e1000_watchdog(unsigned long data)
        mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
 }
 
+enum latency_range {
+       lowest_latency = 0,
+       low_latency = 1,
+       bulk_latency = 2,
+       latency_invalid = 255
+};
+
+/**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ *      Stores a new ITR value based on packets and byte
+ *      counts during the last interrupt.  The advantage of per interrupt
+ *      computation is faster updates and more accurate ITR for the current
+ *      traffic pattern.  Constants in this function were computed
+ *      based on theoretical maximum wire speed and thresholds were set based
+ *      on testing data as well as attempting to minimize response time
+ *      while increasing bulk throughput.
+ *      this functionality is controlled by the InterruptThrottleRate module
+ *      parameter (see e1000_param.c)
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ **/
+static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+                                   uint16_t itr_setting,
+                                   int packets,
+                                   int bytes)
+{
+       unsigned int retval = itr_setting;
+       struct e1000_hw *hw = &adapter->hw;
+
+       if (unlikely(hw->mac_type < e1000_82540))
+               goto update_itr_done;
+
+       if (packets == 0)
+               goto update_itr_done;
+
+
+       switch (itr_setting) {
+       case lowest_latency:
+               if ((packets < 5) && (bytes > 512))
+                       retval = low_latency;
+               break;
+       case low_latency:  /* 50 usec aka 20000 ints/s */
+               if (bytes > 10000) {
+                       if ((packets < 10) ||
+                            ((bytes/packets) > 1200))
+                               retval = bulk_latency;
+                       else if ((packets > 35))
+                               retval = lowest_latency;
+               } else if (packets <= 2 && bytes < 512)
+                       retval = lowest_latency;
+               break;
+       case bulk_latency: /* 250 usec aka 4000 ints/s */
+               if (bytes > 25000) {
+                       if (packets > 35)
+                               retval = low_latency;
+               } else {
+                       if (bytes < 6000)
+                               retval = low_latency;
+               }
+               break;
+       }
+
+update_itr_done:
+       return retval;
+}
+
+static void e1000_set_itr(struct e1000_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       uint16_t current_itr;
+       uint32_t new_itr = adapter->itr;
+
+       if (unlikely(hw->mac_type < e1000_82540))
+               return;
+
+       /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+       if (unlikely(adapter->link_speed != SPEED_1000)) {
+               current_itr = 0;
+               new_itr = 4000;
+               goto set_itr_now;
+       }
+
+       adapter->tx_itr = e1000_update_itr(adapter,
+                                   adapter->tx_itr,
+                                   adapter->total_tx_packets,
+                                   adapter->total_tx_bytes);
+       adapter->rx_itr = e1000_update_itr(adapter,
+                                   adapter->rx_itr,
+                                   adapter->total_rx_packets,
+                                   adapter->total_rx_bytes);
+
+       current_itr = max(adapter->rx_itr, adapter->tx_itr);
+
+       /* conservative mode eliminates the lowest_latency setting */
+       if (current_itr == lowest_latency && (adapter->itr_setting == 3))
+               current_itr = low_latency;
+
+       switch (current_itr) {
+       /* counts and packets in update_itr are dependent on these numbers */
+       case lowest_latency:
+               new_itr = 70000;
+               break;
+       case low_latency:
+               new_itr = 20000; /* aka hwitr = ~200 */
+               break;
+       case bulk_latency:
+               new_itr = 4000;
+               break;
+       default:
+               break;
+       }
+
+set_itr_now:
+       if (new_itr != adapter->itr) {
+               /* this attempts to bias the interrupt rate towards Bulk
+                * by adding intermediate steps when interrupt rate is
+                * increasing */
+               new_itr = new_itr > adapter->itr ?
+                            min(adapter->itr + (new_itr >> 2), new_itr) :
+                            new_itr;
+               adapter->itr = new_itr;
+               E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256));
+       }
+
+       return;
+}
+
 #define E1000_TX_FLAGS_CSUM            0x00000001
 #define E1000_TX_FLAGS_VLAN            0x00000002
 #define E1000_TX_FLAGS_TSO             0x00000004
@@ -2661,6 +2797,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                context_desc->cmd_and_length = cpu_to_le32(cmd_length);
 
                buffer_info->time_stamp = jiffies;
+               buffer_info->next_to_watch = i;
 
                if (++i == tx_ring->count) i = 0;
                tx_ring->next_to_use = i;
@@ -2695,6 +2832,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
 
                buffer_info->time_stamp = jiffies;
+               buffer_info->next_to_watch = i;
 
                if (unlikely(++i == tx_ring->count)) i = 0;
                tx_ring->next_to_use = i;
@@ -2763,6 +2901,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                                size,
                                PCI_DMA_TODEVICE);
                buffer_info->time_stamp = jiffies;
+               buffer_info->next_to_watch = i;
 
                len -= size;
                offset += size;
@@ -2802,6 +2941,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                                        size,
                                        PCI_DMA_TODEVICE);
                        buffer_info->time_stamp = jiffies;
+                       buffer_info->next_to_watch = i;
 
                        len -= size;
                        offset += size;
@@ -2963,6 +3103,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
 
        /* A reprieve! */
        netif_start_queue(netdev);
+       ++adapter->restart_queue;
        return 0;
 }
 
@@ -3451,6 +3592,95 @@ e1000_update_stats(struct e1000_adapter *adapter)
 
        spin_unlock_irqrestore(&adapter->stats_lock, flags);
 }
+#ifdef CONFIG_PCI_MSI
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+
+static
+irqreturn_t e1000_intr_msi(int irq, void *data)
+{
+       struct net_device *netdev = data;
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+#ifndef CONFIG_E1000_NAPI
+       int i;
+#endif
+
+       /* this code avoids the read of ICR but has to get 1000 interrupts
+        * at every link change event before it will notice the change */
+       if (++adapter->detect_link >= 1000) {
+               uint32_t icr = E1000_READ_REG(hw, ICR);
+#ifdef CONFIG_E1000_NAPI
+               /* read ICR disables interrupts using IAM, so keep up with our
+                * enable/disable accounting */
+               atomic_inc(&adapter->irq_sem);
+#endif
+               adapter->detect_link = 0;
+               if ((icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) &&
+                   (icr & E1000_ICR_INT_ASSERTED)) {
+                       hw->get_link_status = 1;
+                       /* 80003ES2LAN workaround--
+                       * For packet buffer work-around on link down event;
+                       * disable receives here in the ISR and
+                       * reset adapter in watchdog
+                       */
+                       if (netif_carrier_ok(netdev) &&
+                           (adapter->hw.mac_type == e1000_80003es2lan)) {
+                               /* disable receives */
+                               uint32_t rctl = E1000_READ_REG(hw, RCTL);
+                               E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
+                       }
+                       /* guard against interrupt when we're going down */
+                       if (!test_bit(__E1000_DOWN, &adapter->flags))
+                               mod_timer(&adapter->watchdog_timer,
+                                         jiffies + 1);
+               }
+       } else {
+               E1000_WRITE_REG(hw, ICR, (0xffffffff & ~(E1000_ICR_RXSEQ |
+                                                        E1000_ICR_LSC)));
+               /* bummer we have to flush here, but things break otherwise as
+                * some event appears to be lost or delayed and throughput
+                * drops.  In almost all tests this flush is un-necessary */
+               E1000_WRITE_FLUSH(hw);
+#ifdef CONFIG_E1000_NAPI
+               /* Interrupt Auto-Mask (IAM)...upon writing ICR, interrupts are
+                * masked.  No need for the IMC write, but it does mean we
+                * should account for it ASAP. */
+               atomic_inc(&adapter->irq_sem);
+#endif
+       }
+
+#ifdef CONFIG_E1000_NAPI
+       if (likely(netif_rx_schedule_prep(netdev))) {
+               adapter->total_tx_bytes = 0;
+               adapter->total_tx_packets = 0;
+               adapter->total_rx_bytes = 0;
+               adapter->total_rx_packets = 0;
+               __netif_rx_schedule(netdev);
+       } else
+               e1000_irq_enable(adapter);
+#else
+       adapter->total_tx_bytes = 0;
+       adapter->total_rx_bytes = 0;
+       adapter->total_tx_packets = 0;
+       adapter->total_rx_packets = 0;
+
+       for (i = 0; i < E1000_MAX_INTR; i++)
+               if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
+                  !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
+                       break;
+
+       if (likely(adapter->itr_setting & 3))
+               e1000_set_itr(adapter);
+#endif
+
+       return IRQ_HANDLED;
+}
+#endif
 
 /**
  * e1000_intr - Interrupt Handler
@@ -3467,7 +3697,17 @@ e1000_intr(int irq, void *data)
        uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
 #ifndef CONFIG_E1000_NAPI
        int i;
-#else
+#endif
+       if (unlikely(!icr))
+               return IRQ_NONE;  /* Not our interrupt */
+
+#ifdef CONFIG_E1000_NAPI
+       /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+        * not set, then the adapter didn't send an interrupt */
+       if (unlikely(hw->mac_type >= e1000_82571 &&
+                    !(icr & E1000_ICR_INT_ASSERTED)))
+               return IRQ_NONE;
+
        /* Interrupt Auto-Mask...upon reading ICR,
         * interrupts are masked.  No need for the
         * IMC write, but it does mean we should
@@ -3476,14 +3716,6 @@ e1000_intr(int irq, void *data)
                atomic_inc(&adapter->irq_sem);
 #endif
 
-       if (unlikely(!icr)) {
-#ifdef CONFIG_E1000_NAPI
-               if (hw->mac_type >= e1000_82571)
-                       e1000_irq_enable(adapter);
-#endif
-               return IRQ_NONE;  /* Not our interrupt */
-       }
-
        if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
                hw->get_link_status = 1;
                /* 80003ES2LAN workaround--
@@ -3504,13 +3736,18 @@ e1000_intr(int irq, void *data)
 
 #ifdef CONFIG_E1000_NAPI
        if (unlikely(hw->mac_type < e1000_82571)) {
+               /* disable interrupts, without the synchronize_irq bit */
                atomic_inc(&adapter->irq_sem);
                E1000_WRITE_REG(hw, IMC, ~0);
                E1000_WRITE_FLUSH(hw);
        }
-       if (likely(netif_rx_schedule_prep(netdev)))
+       if (likely(netif_rx_schedule_prep(netdev))) {
+               adapter->total_tx_bytes = 0;
+               adapter->total_tx_packets = 0;
+               adapter->total_rx_bytes = 0;
+               adapter->total_rx_packets = 0;
                __netif_rx_schedule(netdev);
-       else
+       else
                /* this really should not happen! if it does it is basically a
                 * bug, but not a hard error, so enable ints and continue */
                e1000_irq_enable(adapter);
@@ -3530,11 +3767,19 @@ e1000_intr(int irq, void *data)
                E1000_WRITE_REG(hw, IMC, ~0);
        }
 
+       adapter->total_tx_bytes = 0;
+       adapter->total_rx_bytes = 0;
+       adapter->total_tx_packets = 0;
+       adapter->total_rx_packets = 0;
+
        for (i = 0; i < E1000_MAX_INTR; i++)
                if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
                   !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
                        break;
 
+       if (likely(adapter->itr_setting & 3))
+               e1000_set_itr(adapter);
+
        if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
                e1000_irq_enable(adapter);
 
@@ -3582,6 +3827,8 @@ e1000_clean(struct net_device *poll_dev, int *budget)
        if ((!tx_cleaned && (work_done == 0)) ||
           !netif_running(poll_dev)) {
 quit_polling:
+               if (likely(adapter->itr_setting & 3))
+                       e1000_set_itr(adapter);
                netif_rx_complete(poll_dev);
                e1000_irq_enable(adapter);
                return 0;
@@ -3608,6 +3855,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
        unsigned int count = 0;
 #endif
        boolean_t cleaned = FALSE;
+       unsigned int total_tx_bytes=0, total_tx_packets=0;
 
        i = tx_ring->next_to_clean;
        eop = tx_ring->buffer_info[i].next_to_watch;
@@ -3619,8 +3867,15 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
                        buffer_info = &tx_ring->buffer_info[i];
                        cleaned = (i == eop);
 
+                       if (cleaned) {
+                               /* this packet count is wrong for TSO but has a
+                                * tendency to make dynamic ITR change more
+                                * towards bulk */
+                               total_tx_packets++;
+                               total_tx_bytes += buffer_info->skb->len;
+                       }
                        e1000_unmap_and_free_tx_resource(adapter, buffer_info);
-                       memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
+                       tx_desc->upper.data = 0;
 
                        if (unlikely(++i == tx_ring->count)) i = 0;
                }
@@ -3643,8 +3898,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
                 * sees the new next_to_clean.
                 */
                smp_mb();
-               if (netif_queue_stopped(netdev))
+               if (netif_queue_stopped(netdev)) {
                        netif_wake_queue(netdev);
+                       ++adapter->restart_queue;
+               }
        }
 
        if (adapter->detect_tx_hung) {
@@ -3682,6 +3939,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
                        netif_stop_queue(netdev);
                }
        }
+       adapter->total_tx_bytes += total_tx_bytes;
+       adapter->total_tx_packets += total_tx_packets;
        return cleaned;
 }
 
@@ -3761,6 +4020,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
        unsigned int i;
        int cleaned_count = 0;
        boolean_t cleaned = FALSE;
+       unsigned int total_rx_bytes=0, total_rx_packets=0;
 
        i = rx_ring->next_to_clean;
        rx_desc = E1000_RX_DESC(*rx_ring, i);
@@ -3827,6 +4087,10 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
                 * done after the TBI_ACCEPT workaround above */
                length -= 4;
 
+               /* probably a little skewed due to removing CRC */
+               total_rx_bytes += length;
+               total_rx_packets++;
+
                /* code added for copybreak, this should improve
                 * performance for small packets with large amounts
                 * of reassembly being done in the stack */
@@ -3895,6 +4159,8 @@ next_desc:
        if (cleaned_count)
                adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
 
+       adapter->total_rx_packets += total_rx_packets;
+       adapter->total_rx_bytes += total_rx_bytes;
        return cleaned;
 }
 
@@ -3924,6 +4190,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
        uint32_t length, staterr;
        int cleaned_count = 0;
        boolean_t cleaned = FALSE;
+       unsigned int total_rx_bytes=0, total_rx_packets=0;
 
        i = rx_ring->next_to_clean;
        rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
@@ -4028,6 +4295,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
                pskb_trim(skb, skb->len - 4);
 
 copydone:
+               total_rx_bytes += skb->len;
+               total_rx_packets++;
+
                e1000_rx_checksum(adapter, staterr,
                                  le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
                skb->protocol = eth_type_trans(skb, netdev);
@@ -4076,6 +4346,8 @@ next_desc:
        if (cleaned_count)
                adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
 
+       adapter->total_rx_packets += total_rx_packets;
+       adapter->total_rx_bytes += total_rx_bytes;
        return cleaned;
 }