Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
authorLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 6 Feb 2008 18:47:18 +0000 (10:47 -0800)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Wed, 6 Feb 2008 18:47:18 +0000 (10:47 -0800)
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (35 commits)
  virtio net: fix oops on interface-up
  Fix PHY Lib support for gianfar and ucc_geth
  forcedeth: preserve registers
  forcedeth: phy status fix
  forcedeth: restart tx/rx
  ipvs: Make wrr "no available servers" error message rate-limited
  [PPPOL2TP]: Label unused warning when CONFIG_PROC_FS is not set.
  [NET_SCHED]: cls_flow: support classification based on VLAN tag
  [VLAN]: Constify skb argument to vlan_get_tag()
  [NET_SCHED]: cls_flow: fix key mask validity check
  [NET_SCHED]: em_meta: fix compile warning
  b43: Fix DMA for 30/32-bit DMA engines
  b43: fix build with CONFIG_SSB_PCIHOST=n
  mac80211: Is not EXPERIMENTAL anymore
  iwl3945-base.c: fix off-by-one errors
  b43legacy: fix DMA slot resource leakage
  b43legacy: drop packets we are not able to encrypt
  b43legacy: fix suspend/resume
  b43legacy: fix PIO crash
  Generic HDLC - use random_ether_addr()
  ...

37 files changed:
drivers/net/forcedeth.c
drivers/net/gianfar_mii.c
drivers/net/iseries_veth.c
drivers/net/ixgbe/ixgbe.h
drivers/net/ixgbe/ixgbe_ethtool.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/mv643xx_eth.c
drivers/net/pppol2tp.c
drivers/net/sky2.c
drivers/net/sky2.h
drivers/net/tlan.c
drivers/net/tulip/xircom_cb.c
drivers/net/ucc_geth_mii.c
drivers/net/virtio_net.c
drivers/net/wan/hdlc.c
drivers/net/wan/hdlc_cisco.c
drivers/net/wan/hdlc_fr.c
drivers/net/wan/hdlc_ppp.c
drivers/net/wan/hdlc_raw.c
drivers/net/wan/hdlc_raw_eth.c
drivers/net/wan/hdlc_x25.c
drivers/net/wireless/b43/dma.c
drivers/net/wireless/b43/dma.h
drivers/net/wireless/b43legacy/dma.c
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/b43legacy/pio.c
drivers/net/wireless/b43legacy/xmit.c
drivers/net/wireless/b43legacy/xmit.h
drivers/net/wireless/iwlwifi/iwl3945-base.c
include/linux/hdlc.h
include/linux/if_vlan.h
include/linux/pkt_cls.h
include/linux/ssb/ssb.h
net/ipv4/ipvs/ip_vs_wrr.c
net/mac80211/Kconfig
net/sched/cls_flow.c
net/sched/em_meta.c

index 3634223..d4843d0 100644 (file)
@@ -323,8 +323,8 @@ enum {
        NvRegMIIStatus = 0x180,
 #define NVREG_MIISTAT_ERROR            0x0001
 #define NVREG_MIISTAT_LINKCHANGE       0x0008
-#define NVREG_MIISTAT_MASK             0x000f
-#define NVREG_MIISTAT_MASK2            0x000f
+#define NVREG_MIISTAT_MASK_RW          0x0007
+#define NVREG_MIISTAT_MASK_ALL         0x000f
        NvRegMIIMask = 0x184,
 #define NVREG_MII_LINKCHANGE           0x0008
 
@@ -624,6 +624,9 @@ union ring_type {
 #define NV_MSI_X_VECTOR_TX    0x1
 #define NV_MSI_X_VECTOR_OTHER 0x2
 
+#define NV_RESTART_TX         0x1
+#define NV_RESTART_RX         0x2
+
 /* statistics */
 struct nv_ethtool_str {
        char name[ETH_GSTRING_LEN];
@@ -1061,7 +1064,7 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
        u32 reg;
        int retval;
 
-       writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+       writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
 
        reg = readl(base + NvRegMIIControl);
        if (reg & NVREG_MIICTL_INUSE) {
@@ -1432,16 +1435,30 @@ static void nv_mac_reset(struct net_device *dev)
 {
        struct fe_priv *np = netdev_priv(dev);
        u8 __iomem *base = get_hwbase(dev);
+       u32 temp1, temp2, temp3;
 
        dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
+
        writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
        pci_push(base);
+
+       /* save registers since they will be cleared on reset */
+       temp1 = readl(base + NvRegMacAddrA);
+       temp2 = readl(base + NvRegMacAddrB);
+       temp3 = readl(base + NvRegTransmitPoll);
+
        writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
        pci_push(base);
        udelay(NV_MAC_RESET_DELAY);
        writel(0, base + NvRegMacReset);
        pci_push(base);
        udelay(NV_MAC_RESET_DELAY);
+
+       /* restore saved registers */
+       writel(temp1, base + NvRegMacAddrA);
+       writel(temp2, base + NvRegMacAddrB);
+       writel(temp3, base + NvRegTransmitPoll);
+
        writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
        pci_push(base);
 }
@@ -2767,6 +2784,7 @@ static int nv_update_linkspeed(struct net_device *dev)
        int mii_status;
        int retval = 0;
        u32 control_1000, status_1000, phyreg, pause_flags, txreg;
+       u32 txrxFlags = 0;
 
        /* BMSR_LSTATUS is latched, read it twice:
         * we want the current value.
@@ -2862,6 +2880,16 @@ set_speed:
        np->duplex = newdup;
        np->linkspeed = newls;
 
+       /* The transmitter and receiver must be restarted for safe update */
+       if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
+               txrxFlags |= NV_RESTART_TX;
+               nv_stop_tx(dev);
+       }
+       if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
+               txrxFlags |= NV_RESTART_RX;
+               nv_stop_rx(dev);
+       }
+
        if (np->gigabit == PHY_GIGABIT) {
                phyreg = readl(base + NvRegRandomSeed);
                phyreg &= ~(0x3FF00);
@@ -2950,6 +2978,11 @@ set_speed:
        }
        nv_update_pause(dev, pause_flags);
 
+       if (txrxFlags & NV_RESTART_TX)
+               nv_start_tx(dev);
+       if (txrxFlags & NV_RESTART_RX)
+               nv_start_rx(dev);
+
        return retval;
 }
 
@@ -2976,7 +3009,7 @@ static void nv_link_irq(struct net_device *dev)
        u32 miistat;
 
        miistat = readl(base + NvRegMIIStatus);
-       writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+       writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
        dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
 
        if (miistat & (NVREG_MIISTAT_LINKCHANGE))
@@ -4851,7 +4884,7 @@ static int nv_open(struct net_device *dev)
 
        writel(0, base + NvRegMIIMask);
        writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
-       writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
+       writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
 
        writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
        writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
@@ -4889,7 +4922,7 @@ static int nv_open(struct net_device *dev)
 
        nv_disable_hw_interrupts(dev, np->irqmask);
        pci_push(base);
-       writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
+       writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
        writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
        pci_push(base);
 
@@ -4912,7 +4945,7 @@ static int nv_open(struct net_device *dev)
        {
                u32 miistat;
                miistat = readl(base + NvRegMIIStatus);
-               writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+               writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
                dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
        }
        /* set linkspeed to invalid value, thus force nv_update_linkspeed
@@ -5280,7 +5313,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                phystate &= ~NVREG_ADAPTCTL_RUNNING;
                writel(phystate, base + NvRegAdapterControl);
        }
-       writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus);
+       writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
 
        if (id->driver_data & DEV_HAS_MGMT_UNIT) {
                /* management unit running on the mac? */
index 100bf41..6a647d9 100644 (file)
@@ -127,7 +127,7 @@ int gfar_mdio_reset(struct mii_bus *bus)
        struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
        unsigned int timeout = PHY_INIT_TIMEOUT;
 
-       spin_lock_bh(&bus->mdio_lock);
+       mutex_lock(&bus->mdio_lock);
 
        /* Reset the management interface */
        gfar_write(&regs->miimcfg, MIIMCFG_RESET);
@@ -140,7 +140,7 @@ int gfar_mdio_reset(struct mii_bus *bus)
                        timeout--)
                cpu_relax();
 
-       spin_unlock_bh(&bus->mdio_lock);
+       mutex_unlock(&bus->mdio_lock);
 
        if(timeout <= 0) {
                printk(KERN_ERR "%s: The MII Bus is stuck!\n",
index 419861c..58d3bb6 100644 (file)
@@ -1020,7 +1020,7 @@ static const struct ethtool_ops ops = {
        .get_link = veth_get_link,
 };
 
-static struct net_device * __init veth_probe_one(int vlan,
+static struct net_device *veth_probe_one(int vlan,
                struct vio_dev *vio_dev)
 {
        struct net_device *dev;
index a021a6e..d0bf206 100644 (file)
@@ -136,8 +136,6 @@ struct ixgbe_ring {
        u16 head;
        u16 tail;
 
-       /* To protect race between sender and clean_tx_irq */
-       spinlock_t tx_lock;
 
        struct ixgbe_queue_stats stats;
 
@@ -174,7 +172,6 @@ struct ixgbe_adapter {
        struct vlan_group *vlgrp;
        u16 bd_number;
        u16 rx_buf_len;
-       atomic_t irq_sem;
        struct work_struct reset_task;
 
        /* TX */
@@ -244,6 +241,7 @@ extern const char ixgbe_driver_version[];
 
 extern int ixgbe_up(struct ixgbe_adapter *adapter);
 extern void ixgbe_down(struct ixgbe_adapter *adapter);
+extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
 extern void ixgbe_reset(struct ixgbe_adapter *adapter);
 extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
index 3635344..a119cbd 100644 (file)
@@ -103,21 +103,41 @@ static int ixgbe_get_settings(struct net_device *netdev,
                              struct ethtool_cmd *ecmd)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 link_speed = 0;
+       bool link_up;
 
-       ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
-       ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
-       ecmd->port = PORT_FIBRE;
+       ecmd->supported = SUPPORTED_10000baseT_Full;
+       ecmd->autoneg = AUTONEG_ENABLE;
        ecmd->transceiver = XCVR_EXTERNAL;
+       if (hw->phy.media_type == ixgbe_media_type_copper) {
+               ecmd->supported |= (SUPPORTED_1000baseT_Full |
+                                   SUPPORTED_TP | SUPPORTED_Autoneg);
+
+               ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+                       ecmd->advertising |= ADVERTISED_10000baseT_Full;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+                       ecmd->advertising |= ADVERTISED_1000baseT_Full;
+
+               ecmd->port = PORT_TP;
+       } else {
+               ecmd->supported |= SUPPORTED_FIBRE;
+               ecmd->advertising = (ADVERTISED_10000baseT_Full |
+                                    ADVERTISED_FIBRE);
+               ecmd->port = PORT_FIBRE;
+       }
 
-       if (netif_carrier_ok(adapter->netdev)) {
-               ecmd->speed = SPEED_10000;
+       adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up);
+       if (link_up) {
+               ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+                               SPEED_10000 : SPEED_1000;
                ecmd->duplex = DUPLEX_FULL;
        } else {
                ecmd->speed = -1;
                ecmd->duplex = -1;
        }
 
-       ecmd->autoneg = AUTONEG_DISABLE;
        return 0;
 }
 
@@ -125,17 +145,17 @@ static int ixgbe_set_settings(struct net_device *netdev,
                              struct ethtool_cmd *ecmd)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
 
-       if (ecmd->autoneg == AUTONEG_ENABLE ||
-           ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
-               return -EINVAL;
-
-       if (netif_running(adapter->netdev)) {
-               ixgbe_down(adapter);
-               ixgbe_reset(adapter);
-               ixgbe_up(adapter);
-       } else {
-               ixgbe_reset(adapter);
+       switch (hw->phy.media_type) {
+       case ixgbe_media_type_fiber:
+               if ((ecmd->autoneg == AUTONEG_ENABLE) ||
+                   (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+                       return -EINVAL;
+               /* in this case we currently only support 10Gb/FULL */
+               break;
+       default:
+               break;
        }
 
        return 0;
@@ -147,7 +167,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
 
-       pause->autoneg = AUTONEG_DISABLE;
+       pause->autoneg = (hw->fc.type == ixgbe_fc_full ? 1 : 0);
 
        if (hw->fc.type == ixgbe_fc_rx_pause) {
                pause->rx_pause = 1;
@@ -165,10 +185,8 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
 
-       if (pause->autoneg == AUTONEG_ENABLE)
-               return -EINVAL;
-
-       if (pause->rx_pause && pause->tx_pause)
+       if ((pause->autoneg == AUTONEG_ENABLE) ||
+           (pause->rx_pause && pause->tx_pause))
                hw->fc.type = ixgbe_fc_full;
        else if (pause->rx_pause && !pause->tx_pause)
                hw->fc.type = ixgbe_fc_rx_pause;
@@ -176,15 +194,15 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
                hw->fc.type = ixgbe_fc_tx_pause;
        else if (!pause->rx_pause && !pause->tx_pause)
                hw->fc.type = ixgbe_fc_none;
+       else
+               return -EINVAL;
 
        hw->fc.original_type = hw->fc.type;
 
-       if (netif_running(adapter->netdev)) {
-               ixgbe_down(adapter);
-               ixgbe_up(adapter);
-       } else {
+       if (netif_running(netdev))
+               ixgbe_reinit_locked(adapter);
+       else
                ixgbe_reset(adapter);
-       }
 
        return 0;
 }
@@ -203,12 +221,10 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
        else
                adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
 
-       if (netif_running(netdev)) {
-               ixgbe_down(adapter);
-               ixgbe_up(adapter);
-       } else {
+       if (netif_running(netdev))
+               ixgbe_reinit_locked(adapter);
+       else
                ixgbe_reset(adapter);
-       }
 
        return 0;
 }
@@ -662,7 +678,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
                return 0;
        }
 
-       if (netif_running(adapter->netdev))
+       while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+               msleep(1);
+
+       if (netif_running(netdev))
                ixgbe_down(adapter);
 
        /*
@@ -733,6 +752,7 @@ err_setup:
        if (netif_running(adapter->netdev))
                ixgbe_up(adapter);
 
+       clear_bit(__IXGBE_RESETTING, &adapter->state);
        return err;
 }
 
@@ -820,11 +840,8 @@ static int ixgbe_nway_reset(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-       if (netif_running(netdev)) {
-               ixgbe_down(adapter);
-               ixgbe_reset(adapter);
-               ixgbe_up(adapter);
-       }
+       if (netif_running(netdev))
+               ixgbe_reinit_locked(adapter);
 
        return 0;
 }
index 3732dd6..ead49e5 100644 (file)
@@ -87,6 +87,25 @@ MODULE_VERSION(DRV_VERSION);
 
 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
 
+static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
+{
+       u32 ctrl_ext;
+
+       /* Let firmware take over control of h/w */
+       ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+                       ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+}
+
+static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
+{
+       u32 ctrl_ext;
+
+       /* Let firmware know the driver has taken over */
+       ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
+                       ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
+}
 
 #ifdef DEBUG
 /**
@@ -165,6 +184,15 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
        return false;
 }
 
+#define IXGBE_MAX_TXD_PWR      14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
+                        (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
+       MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)   /* for context */
+
 /**
  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
  * @adapter: board private structure
@@ -177,18 +205,34 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
        struct ixgbe_tx_buffer *tx_buffer_info;
        unsigned int i, eop;
        bool cleaned = false;
-       int count = 0;
+       unsigned int total_tx_bytes = 0, total_tx_packets = 0;
 
        i = tx_ring->next_to_clean;
        eop = tx_ring->tx_buffer_info[i].next_to_watch;
        eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
        while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
-               for (cleaned = false; !cleaned;) {
+               cleaned = false;
+               while (!cleaned) {
                        tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
                        tx_buffer_info = &tx_ring->tx_buffer_info[i];
                        cleaned = (i == eop);
 
                        tx_ring->stats.bytes += tx_buffer_info->length;
+                       if (cleaned) {
+                               struct sk_buff *skb = tx_buffer_info->skb;
+#ifdef NETIF_F_TSO
+                               unsigned int segs, bytecount;
+                               segs = skb_shinfo(skb)->gso_segs ?: 1;
+                               /* multiply data chunks by size of headers */
+                               bytecount = ((segs - 1) * skb_headlen(skb)) +
+                                           skb->len;
+                               total_tx_packets += segs;
+                               total_tx_bytes += bytecount;
+#else
+                               total_tx_packets++;
+                               total_tx_bytes += skb->len;
+#endif
+                       }
                        ixgbe_unmap_and_free_tx_resource(adapter,
                                                         tx_buffer_info);
                        tx_desc->wb.status = 0;
@@ -204,29 +248,36 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
                eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
 
                /* weight of a sort for tx, avoid endless transmit cleanup */
-               if (count++ >= tx_ring->work_limit)
+               if (total_tx_packets >= tx_ring->work_limit)
                        break;
        }
 
        tx_ring->next_to_clean = i;
 
-#define TX_WAKE_THRESHOLD 32
-       spin_lock(&tx_ring->tx_lock);
-
-       if (cleaned && netif_carrier_ok(netdev) &&
-           (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) &&
-           !test_bit(__IXGBE_DOWN, &adapter->state))
-               netif_wake_queue(netdev);
-
-       spin_unlock(&tx_ring->tx_lock);
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+       if (total_tx_packets && netif_carrier_ok(netdev) &&
+           (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
+               /* Make sure that anybody stopping the queue after this
+                * sees the new next_to_clean.
+                */
+               smp_mb();
+               if (netif_queue_stopped(netdev) &&
+                   !test_bit(__IXGBE_DOWN, &adapter->state)) {
+                       netif_wake_queue(netdev);
+                       adapter->restart_queue++;
+               }
+       }
 
        if (adapter->detect_tx_hung)
                if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
                        netif_stop_queue(netdev);
 
-       if (count >= tx_ring->work_limit)
+       if (total_tx_packets >= tx_ring->work_limit)
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
 
+       adapter->net_stats.tx_bytes += total_tx_bytes;
+       adapter->net_stats.tx_packets += total_tx_packets;
+       cleaned = total_tx_packets ? true : false;
        return cleaned;
 }
 
@@ -255,25 +306,40 @@ static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
        }
 }
 
+/**
+ * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: address of board private structure
+ * @status_err: hardware indication of status of receive
+ * @skb: skb currently being received and modified
+ **/
 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
                                         u32 status_err,
                                         struct sk_buff *skb)
 {
        skb->ip_summed = CHECKSUM_NONE;
 
-       /* Ignore Checksum bit is set */
+       /* Ignore Checksum bit is set, or rx csum disabled */
        if ((status_err & IXGBE_RXD_STAT_IXSM) ||
-                    !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+           !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
                return;
-       /* TCP/UDP checksum error bit is set */
-       if (status_err & (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE)) {
-               /* let the stack verify checksum errors */
+
+       /* if IP and error */
+       if ((status_err & IXGBE_RXD_STAT_IPCS) &&
+           (status_err & IXGBE_RXDADV_ERR_IPE)) {
                adapter->hw_csum_rx_error++;
                return;
        }
+
+       if (!(status_err & IXGBE_RXD_STAT_L4CS))
+               return;
+
+       if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+               adapter->hw_csum_rx_error++;
+               return;
+       }
+
        /* It must be a TCP or UDP packet with a valid checksum */
-       if (status_err & (IXGBE_RXD_STAT_L4CS | IXGBE_RXD_STAT_UDPCS))
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
        adapter->hw_csum_rx_good++;
 }
 
@@ -379,6 +445,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
        u16 hdr_info, vlan_tag;
        bool is_vlan, cleaned = false;
        int cleaned_count = 0;
+       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 
        i = rx_ring->next_to_clean;
        upper_len = 0;
@@ -458,6 +525,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
                }
 
                ixgbe_rx_checksum(adapter, staterr, skb);
+
+               /* probably a little skewed due to removing CRC */
+               total_rx_bytes += skb->len;
+               total_rx_packets++;
+
                skb->protocol = eth_type_trans(skb, netdev);
                ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag);
                netdev->last_rx = jiffies;
@@ -486,6 +558,9 @@ next_desc:
        if (cleaned_count)
                ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
 
+       adapter->net_stats.rx_bytes += total_rx_bytes;
+       adapter->net_stats.rx_packets += total_rx_packets;
+
        return cleaned;
 }
 
@@ -535,7 +610,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
                if (!test_bit(__IXGBE_DOWN, &adapter->state))
                        mod_timer(&adapter->watchdog_timer, jiffies);
        }
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
+
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
 
        return IRQ_HANDLED;
 }
@@ -713,7 +790,6 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
                /* Disable interrupts and register for poll. The flush of the
                 * posted write is intentionally left out. */
-               atomic_inc(&adapter->irq_sem);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
                __netif_rx_schedule(netdev, &adapter->napi);
        }
@@ -801,7 +877,6 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
  **/
 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 {
-       atomic_inc(&adapter->irq_sem);
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
        IXGBE_WRITE_FLUSH(&adapter->hw);
        synchronize_irq(adapter->pdev->irq);
@@ -813,15 +888,13 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
  **/
 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
 {
-       if (atomic_dec_and_test(&adapter->irq_sem)) {
-               if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
-                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
-                                       (IXGBE_EIMS_ENABLE_MASK &
-                                        ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
-                               IXGBE_EIMS_ENABLE_MASK);
-               IXGBE_WRITE_FLUSH(&adapter->hw);
-       }
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
+                               (IXGBE_EIMS_ENABLE_MASK &
+                                ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
+                       IXGBE_EIMS_ENABLE_MASK);
+       IXGBE_WRITE_FLUSH(&adapter->hw);
 }
 
 /**
@@ -1040,7 +1113,8 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        u32 ctrl;
 
-       ixgbe_irq_disable(adapter);
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_disable(adapter);
        adapter->vlgrp = grp;
 
        if (grp) {
@@ -1051,7 +1125,8 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
        }
 
-       ixgbe_irq_enable(adapter);
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_enable(adapter);
 }
 
 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -1066,9 +1141,13 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-       ixgbe_irq_disable(adapter);
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_disable(adapter);
+
        vlan_group_set_device(adapter->vlgrp, vid, NULL);
-       ixgbe_irq_enable(adapter);
+
+       if (!test_bit(__IXGBE_DOWN, &adapter->state))
+               ixgbe_irq_enable(adapter);
 
        /* remove VID from filter table */
        ixgbe_set_vfta(&adapter->hw, vid, 0, false);
@@ -1170,6 +1249,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
        u32 txdctl, rxdctl, mhadd;
        int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 
+       ixgbe_get_hw_control(adapter);
+
        if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED |
                              IXGBE_FLAG_MSI_ENABLED)) {
                if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1224,6 +1305,16 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
        return 0;
 }
 
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
+{
+       WARN_ON(in_interrupt());
+       while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+               msleep(1);
+       ixgbe_down(adapter);
+       ixgbe_up(adapter);
+       clear_bit(__IXGBE_RESETTING, &adapter->state);
+}
+
 int ixgbe_up(struct ixgbe_adapter *adapter)
 {
        /* hardware has been reset, we need to reload some things */
@@ -1408,7 +1499,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        msleep(10);
 
        napi_disable(&adapter->napi);
-       atomic_set(&adapter->irq_sem, 0);
 
        ixgbe_irq_disable(adapter);
 
@@ -1447,6 +1537,8 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
        pci_enable_wake(pdev, PCI_D3hot, 0);
        pci_enable_wake(pdev, PCI_D3cold, 0);
 
+       ixgbe_release_hw_control(adapter);
+
        pci_disable_device(pdev);
 
        pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -1481,7 +1573,8 @@ static int ixgbe_clean(struct napi_struct *napi, int budget)
        /* If budget not fully consumed, exit the polling mode */
        if (work_done < budget) {
                netif_rx_complete(netdev, napi);
-               ixgbe_irq_enable(adapter);
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       ixgbe_irq_enable(adapter);
        }
 
        return work_done;
@@ -1506,8 +1599,7 @@ static void ixgbe_reset_task(struct work_struct *work)
 
        adapter->tx_timeout_count++;
 
-       ixgbe_down(adapter);
-       ixgbe_up(adapter);
+       ixgbe_reinit_locked(adapter);
 }
 
 /**
@@ -1590,7 +1682,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
                return -ENOMEM;
        }
 
-       atomic_set(&adapter->irq_sem, 1);
        set_bit(__IXGBE_DOWN, &adapter->state);
 
        return 0;
@@ -1634,7 +1725,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
        txdr->next_to_use = 0;
        txdr->next_to_clean = 0;
        txdr->work_limit = txdr->count;
-       spin_lock_init(&txdr->tx_lock);
 
        return 0;
 }
@@ -1828,10 +1918,8 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
 
        netdev->mtu = new_mtu;
 
-       if (netif_running(netdev)) {
-               ixgbe_down(adapter);
-               ixgbe_up(adapter);
-       }
+       if (netif_running(netdev))
+               ixgbe_reinit_locked(adapter);
 
        return 0;
 }
@@ -1852,14 +1940,8 @@ static int ixgbe_open(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        int err;
-       u32 ctrl_ext;
        u32 num_rx_queues = adapter->num_rx_queues;
 
-       /* Let firmware know the driver has taken over */
-       ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
-                       ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
-
 try_intr_reinit:
        /* allocate transmit descriptors */
        err = ixgbe_setup_all_tx_resources(adapter);
@@ -1910,6 +1992,7 @@ try_intr_reinit:
        return 0;
 
 err_up:
+       ixgbe_release_hw_control(adapter);
        ixgbe_free_irq(adapter);
 err_req_irq:
        ixgbe_free_all_rx_resources(adapter);
@@ -1935,7 +2018,6 @@ err_setup_tx:
 static int ixgbe_close(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       u32 ctrl_ext;
 
        ixgbe_down(adapter);
        ixgbe_free_irq(adapter);
@@ -1943,9 +2025,7 @@ static int ixgbe_close(struct net_device *netdev)
        ixgbe_free_all_tx_resources(adapter);
        ixgbe_free_all_rx_resources(adapter);
 
-       ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
-                       ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
+       ixgbe_release_hw_control(adapter);
 
        return 0;
 }
@@ -1957,22 +2037,26 @@ static int ixgbe_close(struct net_device *netdev)
 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       u64 good_rx, missed_rx, bprc;
+       u64 total_mpc = 0;
+       u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
 
        adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
-       good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC);
-       missed_rx = IXGBE_READ_REG(hw, IXGBE_MPC(0));
-       missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1));
-       missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2));
-       missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3));
-       missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4));
-       missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5));
-       missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6));
-       missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7));
-       adapter->stats.gprc += (good_rx - missed_rx);
-
-       adapter->stats.mpc[0] += missed_rx;
+       for (i = 0; i < 8; i++) {
+               /* for packet buffers not used, the register should read 0 */
+               mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+               missed_rx += mpc;
+               adapter->stats.mpc[i] += mpc;
+               total_mpc += adapter->stats.mpc[i];
+               adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+       }
+       adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+       /* work around hardware counting issue */
+       adapter->stats.gprc -= missed_rx;
+
+       /* 82598 hardware only has a 32 bit counter in the high register */
        adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+       adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+       adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
        bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
        adapter->stats.bprc += bprc;
        adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
@@ -1984,35 +2068,37 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
        adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
        adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
-
        adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
        adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
-       adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC);
        adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
-       adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+       lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+       adapter->stats.lxontxc += lxon;
+       lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+       adapter->stats.lxofftxc += lxoff;
        adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
        adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
-       adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
-       adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0));
+       adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+       /*
+        * 82598 errata - tx of flow control packets is included in tx counters
+        */
+       xon_off_tot = lxon + lxoff;
+       adapter->stats.gptc -= xon_off_tot;
+       adapter->stats.mptc -= xon_off_tot;
+       adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
        adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
        adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
        adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
-       adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
        adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
        adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+       adapter->stats.ptc64 -= xon_off_tot;
        adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
        adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
        adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
        adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
        adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
-       adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
        adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
 
        /* Fill out the OS statistics structure */
-       adapter->net_stats.rx_packets = adapter->stats.gprc;
-       adapter->net_stats.tx_packets = adapter->stats.gptc;
-       adapter->net_stats.rx_bytes = adapter->stats.gorc;
-       adapter->net_stats.tx_bytes = adapter->stats.gotc;
        adapter->net_stats.multicast = adapter->stats.mprc;
 
        /* Rx Errors */
@@ -2021,8 +2107,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        adapter->net_stats.rx_dropped = 0;
        adapter->net_stats.rx_length_errors = adapter->stats.rlec;
        adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
-       adapter->net_stats.rx_missed_errors = adapter->stats.mpc[0];
-
+       adapter->net_stats.rx_missed_errors = total_mpc;
 }
 
 /**
@@ -2076,15 +2161,6 @@ static void ixgbe_watchdog(unsigned long data)
                          round_jiffies(jiffies + 2 * HZ));
 }
 
-#define IXGBE_MAX_TXD_PWR      14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
-
-/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
-                        (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
-       MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1)   /* for context */
-
 static int ixgbe_tso(struct ixgbe_adapter *adapter,
                         struct ixgbe_ring *tx_ring, struct sk_buff *skb,
                         u32 tx_flags, u8 *hdr_len)
@@ -2356,6 +2432,37 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
        writel(i, adapter->hw.hw_addr + tx_ring->tail);
 }
 
+static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
+                                struct ixgbe_ring *tx_ring, int size)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+       netif_stop_queue(netdev);
+       /* Herbert's original patch had:
+        *  smp_mb__after_netif_stop_queue();
+        * but since that doesn't exist yet, just open code it. */
+       smp_mb();
+
+       /* We need to check again in a case another CPU has just
+        * made room available. */
+       if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+               return -EBUSY;
+
+       /* A reprieve! - use start_queue because it doesn't call schedule */
+       netif_wake_queue(netdev);
+       ++adapter->restart_queue;
+       return 0;
+}
+
+static int ixgbe_maybe_stop_tx(struct net_device *netdev,
+                              struct ixgbe_ring *tx_ring, int size)
+{
+       if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+               return 0;
+       return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
+}
+
+
 static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2363,7 +2470,6 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        unsigned int len = skb->len;
        unsigned int first;
        unsigned int tx_flags = 0;
-       unsigned long flags = 0;
        u8 hdr_len;
        int tso;
        unsigned int mss = 0;
@@ -2389,14 +2495,10 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        for (f = 0; f < nr_frags; f++)
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 
-       spin_lock_irqsave(&tx_ring->tx_lock, flags);
-       if (IXGBE_DESC_UNUSED(tx_ring) < (count + 2)) {
+       if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
                adapter->tx_busy++;
-               netif_stop_queue(netdev);
-               spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
                return NETDEV_TX_BUSY;
        }
-       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
        if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
                tx_flags |= IXGBE_TX_FLAGS_VLAN;
                tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
@@ -2423,11 +2525,7 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
        netdev->trans_start = jiffies;
 
-       spin_lock_irqsave(&tx_ring->tx_lock, flags);
-       /* Make sure there is space in the ring for the next send. */
-       if (IXGBE_DESC_UNUSED(tx_ring) < DESC_NEEDED)
-               netif_stop_queue(netdev);
-       spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+       ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
 
        return NETDEV_TX_OK;
 }
@@ -2697,6 +2795,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        return 0;
 
 err_register:
+       ixgbe_release_hw_control(adapter);
 err_hw_init:
 err_sw_init:
 err_eeprom:
@@ -2732,6 +2831,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
 
        unregister_netdev(netdev);
 
+       ixgbe_release_hw_control(adapter);
+
        kfree(adapter->tx_ring);
        kfree(adapter->rx_ring);
 
index 651c269..b528ce7 100644 (file)
@@ -1652,6 +1652,11 @@ static void eth_tx_fill_frag_descs(struct mv643xx_private *mp,
        }
 }
 
+static inline __be16 sum16_as_be(__sum16 sum)
+{
+       return (__force __be16)sum;
+}
+
 /**
  * eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw
  *
@@ -1689,7 +1694,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
        desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               BUG_ON(skb->protocol != ETH_P_IP);
+               BUG_ON(skb->protocol != htons(ETH_P_IP));
 
                cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM |
                           ETH_GEN_IP_V_4_CHECKSUM  |
@@ -1698,10 +1703,10 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
                switch (ip_hdr(skb)->protocol) {
                case IPPROTO_UDP:
                        cmd_sts |= ETH_UDP_FRAME;
-                       desc->l4i_chk = udp_hdr(skb)->check;
+                       desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
                        break;
                case IPPROTO_TCP:
-                       desc->l4i_chk = tcp_hdr(skb)->check;
+                       desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
                        break;
                default:
                        BUG();
index 1b51bb6..5aa0a80 100644 (file)
@@ -2468,9 +2468,10 @@ static int __init pppol2tp_init(void)
 
 out:
        return err;
-
+#ifdef CONFIG_PROC_FS
 out_unregister_pppox_proto:
        unregister_pppox_proto(PX_PROTO_OL2TP);
+#endif
 out_unregister_pppol2tp_proto:
        proto_unregister(&pppol2tp_sk_proto);
        goto out;
index dc06236..9a62959 100644 (file)
@@ -857,7 +857,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
        sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
 
        /* On chips without ram buffer, pause is controled by MAC level */
-       if (sky2_read8(hw, B2_E_0) == 0) {
+       if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
                sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
                sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
 
@@ -1194,7 +1194,7 @@ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2)
        struct sk_buff *skb;
        int i;
 
-       if (sky2->hw->flags & SKY2_HW_FIFO_HANG_CHECK) {
+       if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) {
                unsigned char *start;
                /*
                 * Workaround for a bug in FIFO that cause hang
@@ -1387,6 +1387,7 @@ static int sky2_up(struct net_device *dev)
        if (ramsize > 0) {
                u32 rxspace;
 
+               hw->flags |= SKY2_HW_RAM_BUFFER;
                pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
                if (ramsize < 16)
                        rxspace = ramsize / 2;
@@ -2026,7 +2027,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
 
        synchronize_irq(hw->pdev->irq);
 
-       if (sky2_read8(hw, B2_E_0) == 0)
+       if (!(hw->flags & SKY2_HW_RAM_BUFFER))
                sky2_set_tx_stfwd(hw, port);
 
        ctl = gma_read16(hw, port, GM_GP_CTRL);
@@ -2566,7 +2567,7 @@ static void sky2_watchdog(unsigned long arg)
                        ++active;
 
                        /* For chips with Rx FIFO, check if stuck */
-                       if ((hw->flags & SKY2_HW_FIFO_HANG_CHECK) &&
+                       if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
                             sky2_rx_hung(dev)) {
                                pr_info(PFX "%s: receiver hang detected\n",
                                        dev->name);
@@ -2722,11 +2723,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
 
        switch(hw->chip_id) {
        case CHIP_ID_YUKON_XL:
-               hw->flags = SKY2_HW_GIGABIT
-                       | SKY2_HW_NEWER_PHY;
-               if (hw->chip_rev < 3)
-                       hw->flags |= SKY2_HW_FIFO_HANG_CHECK;
-
+               hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
                break;
 
        case CHIP_ID_YUKON_EC_U:
@@ -2752,7 +2749,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
                        dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
                        return -EOPNOTSUPP;
                }
-               hw->flags = SKY2_HW_GIGABIT | SKY2_HW_FIFO_HANG_CHECK;
+               hw->flags = SKY2_HW_GIGABIT;
                break;
 
        case CHIP_ID_YUKON_FE:
index 2bced1a..5ab5c1c 100644 (file)
@@ -2045,7 +2045,7 @@ struct sky2_hw {
 #define SKY2_HW_FIBRE_PHY      0x00000002
 #define SKY2_HW_GIGABIT                0x00000004
 #define SKY2_HW_NEWER_PHY      0x00000008
-#define SKY2_HW_FIFO_HANG_CHECK        0x00000010
+#define SKY2_HW_RAM_BUFFER     0x00000010
 #define SKY2_HW_NEW_LE         0x00000020      /* new LSOv2 format */
 #define SKY2_HW_AUTO_TX_SUM    0x00000040      /* new IP decode for Tx */
 #define SKY2_HW_ADV_POWER_CTL  0x00000080      /* additional PHY power regs */
index c99ce74..3af5b92 100644 (file)
@@ -465,7 +465,7 @@ static struct pci_driver tlan_driver = {
 
 static int __init tlan_probe(void)
 {
-       static int      pad_allocated;
+       int rc = -ENODEV;
 
        printk(KERN_INFO "%s", tlan_banner);
 
@@ -473,17 +473,22 @@ static int __init tlan_probe(void)
 
        if (TLanPadBuffer == NULL) {
                printk(KERN_ERR "TLAN: Could not allocate memory for pad buffer.\n");
-               return -ENOMEM;
+               rc = -ENOMEM;
+               goto err_out;
        }
 
        memset(TLanPadBuffer, 0, TLAN_MIN_FRAME_SIZE);
-       pad_allocated = 1;
 
        TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
 
        /* Use new style PCI probing. Now the kernel will
           do most of this for us */
-       pci_register_driver(&tlan_driver);
+       rc = pci_register_driver(&tlan_driver);
+
+       if (rc != 0) {
+               printk(KERN_ERR "TLAN: Could not register pci driver.\n");
+               goto err_out_pci_free;
+       }
 
        TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
        TLan_EisaProbe();
@@ -493,11 +498,17 @@ static int __init tlan_probe(void)
                 tlan_have_pci, tlan_have_eisa);
 
        if (TLanDevicesInstalled == 0) {
-               pci_unregister_driver(&tlan_driver);
-               pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
-               return -ENODEV;
+               rc = -ENODEV;
+               goto  err_out_pci_unreg;
        }
        return 0;
+
+err_out_pci_unreg:
+       pci_unregister_driver(&tlan_driver);
+err_out_pci_free:
+       pci_free_consistent(NULL, TLAN_MIN_FRAME_SIZE, TLanPadBuffer, TLanPadBufferDMA);
+err_out:
+       return rc;
 }
 
 
index 8fc7274..6b93d01 100644 (file)
@@ -441,7 +441,7 @@ static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
        spin_unlock_irqrestore(&card->lock,flags);
        trigger_transmit(card);
 
-       return -EIO;
+       return NETDEV_TX_BUSY;
 }
 
 
index e3ba14a..c69e654 100644 (file)
@@ -109,7 +109,7 @@ int uec_mdio_reset(struct mii_bus *bus)
        struct ucc_mii_mng __iomem *regs = (void __iomem *)bus->priv;
        unsigned int timeout = PHY_INIT_TIMEOUT;
 
-       spin_lock_bh(&bus->mdio_lock);
+       mutex_lock(&bus->mdio_lock);
 
        /* Reset the management interface */
        out_be32(&regs->miimcfg, MIIMCFG_RESET_MANAGEMENT);
@@ -121,7 +121,7 @@ int uec_mdio_reset(struct mii_bus *bus)
        while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--)
                cpu_relax();
 
-       spin_unlock_bh(&bus->mdio_lock);
+       mutex_unlock(&bus->mdio_lock);
 
        if (timeout <= 0) {
                printk(KERN_ERR "%s: The MII Bus is stuck!\n", bus->name);
index e66de0c..fdc2367 100644 (file)
@@ -302,10 +302,12 @@ static int virtnet_open(struct net_device *dev)
 
        /* If all buffers were filled by other side before we napi_enabled, we
         * won't get another interrupt, so process any outstanding packets
-        * now.  virtnet_poll wants re-enable the queue, so we disable here. */
-       vi->rvq->vq_ops->disable_cb(vi->rvq);
-       netif_rx_schedule(vi->dev, &vi->napi);
-
+        * now.  virtnet_poll wants re-enable the queue, so we disable here.
+        * We synchronize against interrupts via NAPI_STATE_SCHED */
+       if (netif_rx_schedule_prep(dev, &vi->napi)) {
+               vi->rvq->vq_ops->disable_cb(vi->rvq);
+               __netif_rx_schedule(dev, &vi->napi);
+       }
        return 0;
 }
 
index d553e6f..39951d0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Generic HDLC support routines for Linux
  *
- * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
+ * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License
@@ -39,7 +39,7 @@
 #include <net/net_namespace.h>
 
 
-static const char* version = "HDLC support module revision 1.21";
+static const char* version = "HDLC support module revision 1.22";
 
 #undef DEBUG_LINK
 
@@ -66,19 +66,15 @@ static struct net_device_stats *hdlc_get_stats(struct net_device *dev)
 static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
                    struct packet_type *p, struct net_device *orig_dev)
 {
-       struct hdlc_device_desc *desc = dev_to_desc(dev);
+       struct hdlc_device *hdlc = dev_to_hdlc(dev);
 
        if (dev->nd_net != &init_net) {
                kfree_skb(skb);
                return 0;
        }
 
-       if (desc->netif_rx)
-               return desc->netif_rx(skb);
-
-       desc->stats.rx_dropped++; /* Shouldn't happen */
-       dev_kfree_skb(skb);
-       return NET_RX_DROP;
+       BUG_ON(!hdlc->proto->netif_rx);
+       return hdlc->proto->netif_rx(skb);
 }
 
 
@@ -87,7 +83,7 @@ static inline void hdlc_proto_start(struct net_device *dev)
 {
        hdlc_device *hdlc = dev_to_hdlc(dev);
        if (hdlc->proto->start)
-               return hdlc->proto->start(dev);
+               hdlc->proto->start(dev);
 }
 
 
@@ -96,7 +92,7 @@ static inline void hdlc_proto_stop(struct net_device *dev)
 {
        hdlc_device *hdlc = dev_to_hdlc(dev);
        if (hdlc->proto->stop)
-               return hdlc->proto->stop(dev);
+               hdlc->proto->stop(dev);
 }
 
 
@@ -263,8 +259,7 @@ static void hdlc_setup(struct net_device *dev)
 struct net_device *alloc_hdlcdev(void *priv)
 {
        struct net_device *dev;
-       dev = alloc_netdev(sizeof(struct hdlc_device_desc) +
-                          sizeof(hdlc_device), "hdlc%d", hdlc_setup);
+       dev = alloc_netdev(sizeof(struct hdlc_device), "hdlc%d", hdlc_setup);
        if (dev)
                dev_to_hdlc(dev)->priv = priv;
        return dev;
@@ -281,7 +276,7 @@ void unregister_hdlc_device(struct net_device *dev)
 
 
 int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
-                        int (*rx)(struct sk_buff *skb), size_t size)
+                        size_t size)
 {
        detach_hdlc_protocol(dev);
 
@@ -297,7 +292,6 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
                        return -ENOBUFS;
                }
        dev_to_hdlc(dev)->proto = proto;
-       dev_to_desc(dev)->netif_rx = rx;
        return 0;
 }
 
index 038a6e7..7133c68 100644 (file)
@@ -250,7 +250,7 @@ static int cisco_rx(struct sk_buff *skb)
        return NET_RX_DROP;
 
  rx_error:
-       dev_to_desc(dev)->stats.rx_errors++; /* Mark error */
+       dev_to_hdlc(dev)->stats.rx_errors++; /* Mark error */
        dev_kfree_skb_any(skb);
        return NET_RX_DROP;
 }
@@ -314,6 +314,7 @@ static struct hdlc_proto proto = {
        .stop           = cisco_stop,
        .type_trans     = cisco_type_trans,
        .ioctl          = cisco_ioctl,
+       .netif_rx       = cisco_rx,
        .module         = THIS_MODULE,
 };
 
@@ -360,7 +361,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
                if (result)
                        return result;
 
-               result = attach_hdlc_protocol(dev, &proto, cisco_rx,
+               result = attach_hdlc_protocol(dev, &proto,
                                              sizeof(struct cisco_state));
                if (result)
                        return result;
index 071a64c..c4ab032 100644 (file)
@@ -42,7 +42,6 @@
 #include <linux/init.h>
 #include <linux/skbuff.h>
 #include <linux/pkt_sched.h>
-#include <linux/random.h>
 #include <linux/inetdevice.h>
 #include <linux/lapb.h>
 #include <linux/rtnetlink.h>
@@ -136,6 +135,10 @@ typedef struct pvc_device_struct {
        }state;
 }pvc_device;
 
+struct pvc_desc {
+       struct net_device_stats stats;
+       pvc_device *pvc;
+};
 
 struct frad_state {
        fr_proto settings;
@@ -171,17 +174,20 @@ static inline void dlci_to_q922(u8 *hdr, u16 dlci)
 }
 
 
-static inline struct frad_state * state(hdlc_device *hdlc)
+static inline struct frad_state* state(hdlc_device *hdlc)
 {
        return(struct frad_state *)(hdlc->state);
 }
 
-
-static __inline__ pvc_device* dev_to_pvc(struct net_device *dev)
+static inline struct pvc_desc* pvcdev_to_desc(struct net_device *dev)
 {
        return dev->priv;
 }
 
+static inline struct net_device_stats* pvc_get_stats(struct net_device *dev)
+{
+       return &pvcdev_to_desc(dev)->stats;
+}
 
 static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
 {
@@ -351,7 +357,7 @@ static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
 
 static int pvc_open(struct net_device *dev)
 {
-       pvc_device *pvc = dev_to_pvc(dev);
+       pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
 
        if ((pvc->frad->flags & IFF_UP) == 0)
                return -EIO;  /* Frad must be UP in order to activate PVC */
@@ -371,7 +377,7 @@ static int pvc_open(struct net_device *dev)
 
 static int pvc_close(struct net_device *dev)
 {
-       pvc_device *pvc = dev_to_pvc(dev);
+       pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
 
        if (--pvc->open_count == 0) {
                hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
@@ -390,7 +396,7 @@ static int pvc_close(struct net_device *dev)
 
 static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-       pvc_device *pvc = dev_to_pvc(dev);
+       pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
        fr_proto_pvc_info info;
 
        if (ifr->ifr_settings.type == IF_GET_PROTO) {
@@ -416,17 +422,9 @@ static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        return -EINVAL;
 }
 
-
-static inline struct net_device_stats *pvc_get_stats(struct net_device *dev)
-{
-       return &dev_to_desc(dev)->stats;
-}
-
-
-
 static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       pvc_device *pvc = dev_to_pvc(dev);
+       pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
        struct net_device_stats *stats = pvc_get_stats(dev);
 
        if (pvc->state.active) {
@@ -957,7 +955,7 @@ static int fr_rx(struct sk_buff *skb)
 
 
        if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
-               dev_to_desc(frad)->stats.rx_dropped++;
+               dev_to_hdlc(frad)->stats.rx_dropped++;
                return NET_RX_DROP;
        }
 
@@ -1018,7 +1016,7 @@ static int fr_rx(struct sk_buff *skb)
        }
 
  rx_error:
-       dev_to_desc(frad)->stats.rx_errors++; /* Mark error */
+       dev_to_hdlc(frad)->stats.rx_errors++; /* Mark error */
        dev_kfree_skb_any(skb);
        return NET_RX_DROP;
 }
@@ -1109,11 +1107,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
        used = pvc_is_used(pvc);
 
        if (type == ARPHRD_ETHER)
-               dev = alloc_netdev(sizeof(struct net_device_stats),
-                                  "pvceth%d", ether_setup);
+               dev = alloc_netdev(sizeof(struct pvc_desc), "pvceth%d",
+                                  ether_setup);
        else
-               dev = alloc_netdev(sizeof(struct net_device_stats),
-                                  "pvc%d", pvc_setup);
+               dev = alloc_netdev(sizeof(struct pvc_desc), "pvc%d", pvc_setup);
 
        if (!dev) {
                printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n",
@@ -1122,10 +1119,9 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
                return -ENOBUFS;
        }
 
-       if (type == ARPHRD_ETHER) {
-               memcpy(dev->dev_addr, "\x00\x01", 2);
-                get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
-       } else {
+       if (type == ARPHRD_ETHER)
+               random_ether_addr(dev->dev_addr);
+       else {
                *(__be16*)dev->dev_addr = htons(dlci);
                dlci_to_q922(dev->broadcast, dlci);
        }
@@ -1137,7 +1133,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
        dev->change_mtu = pvc_change_mtu;
        dev->mtu = HDLC_MAX_MTU;
        dev->tx_queue_len = 0;
-       dev->priv = pvc;
+       pvcdev_to_desc(dev)->pvc = pvc;
 
        result = dev_alloc_name(dev, dev->name);
        if (result < 0) {
@@ -1219,6 +1215,7 @@ static struct hdlc_proto proto = {
        .stop           = fr_stop,
        .detach         = fr_destroy,
        .ioctl          = fr_ioctl,
+       .netif_rx       = fr_rx,
        .module         = THIS_MODULE,
 };
 
@@ -1277,7 +1274,7 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
                        return result;
 
                if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
-                       result = attach_hdlc_protocol(dev, &proto, fr_rx,
+                       result = attach_hdlc_protocol(dev, &proto,
                                                      sizeof(struct frad_state));
                        if (result)
                                return result;
index 519e155..10396d9 100644 (file)
@@ -122,7 +122,7 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
                if (result)
                        return result;
 
-               result = attach_hdlc_protocol(dev, &proto, NULL,
+               result = attach_hdlc_protocol(dev, &proto,
                                              sizeof(struct ppp_state));
                if (result)
                        return result;
index e23bc66..bbbb819 100644 (file)
@@ -82,7 +82,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
                if (result)
                        return result;
 
-               result = attach_hdlc_protocol(dev, &proto, NULL,
+               result = attach_hdlc_protocol(dev, &proto,
                                              sizeof(raw_hdlc_proto));
                if (result)
                        return result;
index 8895394..d20c685 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/init.h>
 #include <linux/skbuff.h>
 #include <linux/pkt_sched.h>
-#include <linux/random.h>
 #include <linux/inetdevice.h>
 #include <linux/lapb.h>
 #include <linux/rtnetlink.h>
@@ -96,7 +95,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
                if (result)
                        return result;
 
-               result = attach_hdlc_protocol(dev, &proto, NULL,
+               result = attach_hdlc_protocol(dev, &proto,
                                              sizeof(raw_hdlc_proto));
                if (result)
                        return result;
@@ -107,8 +106,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
                ether_setup(dev);
                dev->change_mtu = old_ch_mtu;
                dev->tx_queue_len = old_qlen;
-               memcpy(dev->dev_addr, "\x00\x01", 2);
-                get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
+               random_ether_addr(dev->dev_addr);
                netif_dormant_off(dev);
                return 0;
        }
index cd7b22f..c15cc11 100644 (file)
@@ -164,17 +164,17 @@ static void x25_close(struct net_device *dev)
 
 static int x25_rx(struct sk_buff *skb)
 {
-       struct hdlc_device_desc *desc = dev_to_desc(skb->dev);
+       struct hdlc_device *hdlc = dev_to_hdlc(skb->dev);
 
        if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
-               desc->stats.rx_dropped++;
+               hdlc->stats.rx_dropped++;
                return NET_RX_DROP;
        }
 
        if (lapb_data_received(skb->dev, skb) == LAPB_OK)
                return NET_RX_SUCCESS;
 
-       desc->stats.rx_errors++;
+       hdlc->stats.rx_errors++;
        dev_kfree_skb_any(skb);
        return NET_RX_DROP;
 }
@@ -184,6 +184,7 @@ static struct hdlc_proto proto = {
        .open           = x25_open,
        .close          = x25_close,
        .ioctl          = x25_ioctl,
+       .netif_rx       = x25_rx,
        .module         = THIS_MODULE,
 };
 
@@ -211,8 +212,7 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
                if (result)
                        return result;
 
-               if ((result = attach_hdlc_protocol(dev, &proto,
-                                                  x25_rx, 0)) != 0)
+               if ((result = attach_hdlc_protocol(dev, &proto, 0)))
                        return result;
                dev->hard_start_xmit = x25_xmit;
                dev->type = ARPHRD_X25;
index 8a708b7..3dfb28a 100644 (file)
@@ -337,7 +337,7 @@ static inline int txring_to_priority(struct b43_dmaring *ring)
        return idx_to_prio[index];
 }
 
-u16 b43_dmacontroller_base(int dma64bit, int controller_idx)
+static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
 {
        static const u16 map64[] = {
                B43_MMIO_DMA64_BASE0,
@@ -356,7 +356,7 @@ u16 b43_dmacontroller_base(int dma64bit, int controller_idx)
                B43_MMIO_DMA32_BASE5,
        };
 
-       if (dma64bit) {
+       if (type == B43_DMA_64BIT) {
                B43_WARN_ON(!(controller_idx >= 0 &&
                              controller_idx < ARRAY_SIZE(map64)));
                return map64[controller_idx];
@@ -437,7 +437,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
         * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
         * which accounts for the GFP_DMA flag below.
         */
-       if (ring->dma64)
+       if (ring->type == B43_DMA_64BIT)
                flags |= GFP_DMA;
        ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
                                            &(ring->dmabase), flags);
@@ -459,7 +459,8 @@ static void free_ringmemory(struct b43_dmaring *ring)
 }
 
 /* Reset the RX DMA channel */
-int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
+static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
+                                     enum b43_dmatype type)
 {
        int i;
        u32 value;
@@ -467,12 +468,13 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
 
        might_sleep();
 
-       offset = dma64 ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
+       offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
        b43_write32(dev, mmio_base + offset, 0);
        for (i = 0; i < 10; i++) {
-               offset = dma64 ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS;
+               offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
+                                                  B43_DMA32_RXSTATUS;
                value = b43_read32(dev, mmio_base + offset);
-               if (dma64) {
+               if (type == B43_DMA_64BIT) {
                        value &= B43_DMA64_RXSTAT;
                        if (value == B43_DMA64_RXSTAT_DISABLED) {
                                i = -1;
@@ -496,7 +498,8 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
 }
 
 /* Reset the TX DMA channel */
-int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
+static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
+                                     enum b43_dmatype type)
 {
        int i;
        u32 value;
@@ -505,9 +508,10 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
        might_sleep();
 
        for (i = 0; i < 10; i++) {
-               offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS;
+               offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
+                                                  B43_DMA32_TXSTATUS;
                value = b43_read32(dev, mmio_base + offset);
-               if (dma64) {
+               if (type == B43_DMA_64BIT) {
                        value &= B43_DMA64_TXSTAT;
                        if (value == B43_DMA64_TXSTAT_DISABLED ||
                            value == B43_DMA64_TXSTAT_IDLEWAIT ||
@@ -522,12 +526,13 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
                }
                msleep(1);
        }
-       offset = dma64 ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
+       offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
        b43_write32(dev, mmio_base + offset, 0);
        for (i = 0; i < 10; i++) {
-               offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS;
+               offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
+                                                  B43_DMA32_TXSTATUS;
                value = b43_read32(dev, mmio_base + offset);
-               if (dma64) {
+               if (type == B43_DMA_64BIT) {
                        value &= B43_DMA64_TXSTAT;
                        if (value == B43_DMA64_TXSTAT_DISABLED) {
                                i = -1;
@@ -552,6 +557,33 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
        return 0;
 }
 
+/* Check if a DMA mapping address is invalid. */
+static bool b43_dma_mapping_error(struct b43_dmaring *ring,
+                                 dma_addr_t addr,
+                                 size_t buffersize)
+{
+       if (unlikely(dma_mapping_error(addr)))
+               return 1;
+
+       switch (ring->type) {
+       case B43_DMA_30BIT:
+               if ((u64)addr + buffersize > (1ULL << 30))
+                       return 1;
+               break;
+       case B43_DMA_32BIT:
+               if ((u64)addr + buffersize > (1ULL << 32))
+                       return 1;
+               break;
+       case B43_DMA_64BIT:
+               /* Currently we can't have addresses beyond
+                * 64bit in the kernel. */
+               break;
+       }
+
+       /* The address is OK. */
+       return 0;
+}
+
 static int setup_rx_descbuffer(struct b43_dmaring *ring,
                               struct b43_dmadesc_generic *desc,
                               struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
@@ -567,7 +599,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
        if (unlikely(!skb))
                return -ENOMEM;
        dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
-       if (dma_mapping_error(dmaaddr)) {
+       if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
                /* ugh. try to realloc in zone_dma */
                gfp_flags |= GFP_DMA;
 
@@ -580,7 +612,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
                                         ring->rx_buffersize, 0);
        }
 
-       if (dma_mapping_error(dmaaddr)) {
+       if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
                dev_kfree_skb_any(skb);
                return -EIO;
        }
@@ -645,7 +677,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
        u32 trans = ssb_dma_translation(ring->dev->dev);
 
        if (ring->tx) {
-               if (ring->dma64) {
+               if (ring->type == B43_DMA_64BIT) {
                        u64 ringbase = (u64) (ring->dmabase);
 
                        addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@@ -677,7 +709,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
                err = alloc_initial_descbuffers(ring);
                if (err)
                        goto out;
-               if (ring->dma64) {
+               if (ring->type == B43_DMA_64BIT) {
                        u64 ringbase = (u64) (ring->dmabase);
 
                        addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
@@ -722,16 +754,16 @@ static void dmacontroller_cleanup(struct b43_dmaring *ring)
 {
        if (ring->tx) {
                b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
-                                          ring->dma64);
-               if (ring->dma64) {
+                                          ring->type);
+               if (ring->type == B43_DMA_64BIT) {
                        b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
                        b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
                } else
                        b43_dma_write(ring, B43_DMA32_TXRING, 0);
        } else {
                b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
-                                          ring->dma64);
-               if (ring->dma64) {
+                                          ring->type);
+               if (ring->type == B43_DMA_64BIT) {
                        b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
                        b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
                } else
@@ -786,7 +818,8 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
 static
 struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
                                      int controller_index,
-                                     int for_tx, int dma64)
+                                     int for_tx,
+                                     enum b43_dmatype type)
 {
        struct b43_dmaring *ring;
        int err;
@@ -796,6 +829,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
        if (!ring)
                goto out;
+       ring->type = type;
 
        nr_slots = B43_RXRING_SLOTS;
        if (for_tx)
@@ -818,7 +852,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
                                          b43_txhdr_size(dev),
                                          DMA_TO_DEVICE);
 
-               if (dma_mapping_error(dma_test)) {
+               if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev))) {
                        /* ugh realloc */
                        kfree(ring->txhdr_cache);
                        ring->txhdr_cache = kcalloc(nr_slots,
@@ -832,7 +866,8 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
                                                  b43_txhdr_size(dev),
                                                  DMA_TO_DEVICE);
 
-                       if (dma_mapping_error(dma_test))
+                       if (b43_dma_mapping_error(ring, dma_test,
+                                                 b43_txhdr_size(dev)))
                                goto err_kfree_txhdr_cache;
                }
 
@@ -843,10 +878,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
 
        ring->dev = dev;
        ring->nr_slots = nr_slots;
-       ring->mmio_base = b43_dmacontroller_base(dma64, controller_index);
+       ring->mmio_base = b43_dmacontroller_base(type, controller_index);
        ring->index = controller_index;
-       ring->dma64 = !!dma64;
-       if (dma64)
+       if (type == B43_DMA_64BIT)
                ring->ops = &dma64_ops;
        else
                ring->ops = &dma32_ops;
@@ -896,8 +930,8 @@ static void b43_destroy_dmaring(struct b43_dmaring *ring)
        if (!ring)
                return;
 
-       b43dbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
-              (ring->dma64) ? "64" : "32",
+       b43dbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots: %d/%d\n",
+              (unsigned int)(ring->type),
               ring->mmio_base,
               (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots);
        /* Device IRQs are disabled prior entering this function,
@@ -941,12 +975,22 @@ int b43_dma_init(struct b43_wldev *dev)
        struct b43_dmaring *ring;
        int err;
        u64 dmamask;
-       int dma64 = 0;
+       enum b43_dmatype type;
 
        dmamask = supported_dma_mask(dev);
-       if (dmamask == DMA_64BIT_MASK)
-               dma64 = 1;
-
+       switch (dmamask) {
+       default:
+               B43_WARN_ON(1);
+       case DMA_30BIT_MASK:
+               type = B43_DMA_30BIT;
+               break;
+       case DMA_32BIT_MASK:
+               type = B43_DMA_32BIT;
+               break;
+       case DMA_64BIT_MASK:
+               type = B43_DMA_64BIT;
+               break;
+       }
        err = ssb_dma_set_mask(dev->dev, dmamask);
        if (err) {
                b43err(dev->wl, "The machine/kernel does not support "
@@ -958,52 +1002,51 @@ int b43_dma_init(struct b43_wldev *dev)
 
        err = -ENOMEM;
        /* setup TX DMA channels. */
-       ring = b43_setup_dmaring(dev, 0, 1, dma64);
+       ring = b43_setup_dmaring(dev, 0, 1, type);
        if (!ring)
                goto out;
        dma->tx_ring0 = ring;
 
-       ring = b43_setup_dmaring(dev, 1, 1, dma64);
+       ring = b43_setup_dmaring(dev, 1, 1, type);
        if (!ring)
                goto err_destroy_tx0;
        dma->tx_ring1 = ring;
 
-       ring = b43_setup_dmaring(dev, 2, 1, dma64);
+       ring = b43_setup_dmaring(dev, 2, 1, type);
        if (!ring)
                goto err_destroy_tx1;
        dma->tx_ring2 = ring;
 
-       ring = b43_setup_dmaring(dev, 3, 1, dma64);
+       ring = b43_setup_dmaring(dev, 3, 1, type);
        if (!ring)
                goto err_destroy_tx2;
        dma->tx_ring3 = ring;
 
-       ring = b43_setup_dmaring(dev, 4, 1, dma64);
+       ring = b43_setup_dmaring(dev, 4, 1, type);
        if (!ring)
                goto err_destroy_tx3;
        dma->tx_ring4 = ring;
 
-       ring = b43_setup_dmaring(dev, 5, 1, dma64);
+       ring = b43_setup_dmaring(dev, 5, 1, type);
        if (!ring)
                goto err_destroy_tx4;
        dma->tx_ring5 = ring;
 
        /* setup RX DMA channels. */
-       ring = b43_setup_dmaring(dev, 0, 0, dma64);
+       ring = b43_setup_dmaring(dev, 0, 0, type);
        if (!ring)
                goto err_destroy_tx5;
        dma->rx_ring0 = ring;
 
        if (dev->dev->id.revision < 5) {
-               ring = b43_setup_dmaring(dev, 3, 0, dma64);
+               ring = b43_setup_dmaring(dev, 3, 0, type);
                if (!ring)
                        goto err_destroy_rx0;
                dma->rx_ring3 = ring;
        }
 
-       b43dbg(dev->wl, "%d-bit DMA initialized\n",
-              (dmamask == DMA_64BIT_MASK) ? 64 :
-              (dmamask == DMA_32BIT_MASK) ? 32 : 30);
+       b43dbg(dev->wl, "%u-bit DMA initialized\n",
+              (unsigned int)type);
        err = 0;
       out:
        return err;
@@ -1146,7 +1189,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
 
        meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
                                           hdrsize, 1);
-       if (dma_mapping_error(meta_hdr->dmaaddr)) {
+       if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize)) {
                ring->current_slot = old_top_slot;
                ring->used_slots = old_used_slots;
                return -EIO;
@@ -1165,7 +1208,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
 
        meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
        /* create a bounce buffer in zone_dma on mapping failure. */
-       if (dma_mapping_error(meta->dmaaddr)) {
+       if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
                bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
                if (!bounce_skb) {
                        ring->current_slot = old_top_slot;
@@ -1179,7 +1222,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
                skb = bounce_skb;
                meta->skb = skb;
                meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
-               if (dma_mapping_error(meta->dmaaddr)) {
+               if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
                        ring->current_slot = old_top_slot;
                        ring->used_slots = old_used_slots;
                        err = -EIO;
index 58db03a..c0d6b69 100644 (file)
@@ -203,6 +203,12 @@ struct b43_dma_ops {
        void (*set_current_rxslot) (struct b43_dmaring * ring, int slot);
 };
 
+enum b43_dmatype {
+       B43_DMA_30BIT   = 30,
+       B43_DMA_32BIT   = 32,
+       B43_DMA_64BIT   = 64,
+};
+
 struct b43_dmaring {
        /* Lowlevel DMA ops. */
        const struct b43_dma_ops *ops;
@@ -235,8 +241,8 @@ struct b43_dmaring {
        int index;
        /* Boolean. Is this a TX ring? */
        bool tx;
-       /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */
-       bool dma64;
+       /* The type of DMA engine used. */
+       enum b43_dmatype type;
        /* Boolean. Is this ring stopped at ieee80211 level? */
        bool stopped;
        /* Lock, only used for TX. */
@@ -255,8 +261,7 @@ static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset)
        return b43_read32(ring->dev, ring->mmio_base + offset);
 }
 
-static inline
-    void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value)
+static inline void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value)
 {
        b43_write32(ring->dev, ring->mmio_base + offset, value);
 }
@@ -264,13 +269,6 @@ static inline
 int b43_dma_init(struct b43_wldev *dev);
 void b43_dma_free(struct b43_wldev *dev);
 
-int b43_dmacontroller_rx_reset(struct b43_wldev *dev,
-                              u16 dmacontroller_mmio_base, int dma64);
-int b43_dmacontroller_tx_reset(struct b43_wldev *dev,
-                              u16 dmacontroller_mmio_base, int dma64);
-
-u16 b43_dmacontroller_base(int dma64bit, int dmacontroller_idx);
-
 void b43_dma_tx_suspend(struct b43_wldev *dev);
 void b43_dma_tx_resume(struct b43_wldev *dev);
 
index 83161d9..6e08405 100644 (file)
@@ -1164,7 +1164,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
 {
        const struct b43legacy_dma_ops *ops = ring->ops;
        u8 *header;
-       int slot;
+       int slot, old_top_slot, old_used_slots;
        int err;
        struct b43legacy_dmadesc_generic *desc;
        struct b43legacy_dmadesc_meta *meta;
@@ -1174,6 +1174,9 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
 #define SLOTS_PER_PACKET  2
        B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
 
+       old_top_slot = ring->current_slot;
+       old_used_slots = ring->used_slots;
+
        /* Get a slot for the header. */
        slot = request_slot(ring);
        desc = ops->idx2desc(ring, slot, &meta_hdr);
@@ -1181,9 +1184,14 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
 
        header = &(ring->txhdr_cache[slot * sizeof(
                               struct b43legacy_txhdr_fw3)]);
-       b43legacy_generate_txhdr(ring->dev, header,
+       err = b43legacy_generate_txhdr(ring->dev, header,
                                 skb->data, skb->len, ctl,
                                 generate_cookie(ring, slot));
+       if (unlikely(err)) {
+               ring->current_slot = old_top_slot;
+               ring->used_slots = old_used_slots;
+               return err;
+       }
 
        meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
                                       sizeof(struct b43legacy_txhdr_fw3), 1);
@@ -1206,6 +1214,8 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
        if (dma_mapping_error(meta->dmaaddr)) {
                bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
                if (!bounce_skb) {
+                       ring->current_slot = old_top_slot;
+                       ring->used_slots = old_used_slots;
                        err = -ENOMEM;
                        goto out_unmap_hdr;
                }
@@ -1216,6 +1226,8 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
                meta->skb = skb;
                meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
                if (dma_mapping_error(meta->dmaaddr)) {
+                       ring->current_slot = old_top_slot;
+                       ring->used_slots = old_used_slots;
                        err = -EIO;
                        goto out_free_bounce;
                }
@@ -1282,6 +1294,13 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
        B43legacy_BUG_ON(ring->stopped);
 
        err = dma_tx_fragment(ring, skb, ctl);
+       if (unlikely(err == -ENOKEY)) {
+               /* Drop this packet, as we don't have the encryption key
+                * anymore and must not transmit it unencrypted. */
+               dev_kfree_skb_any(skb);
+               err = 0;
+               goto out_unlock;
+       }
        if (unlikely(err)) {
                b43legacyerr(dev->wl, "DMA tx mapping failure\n");
                goto out_unlock;
index aa20d5d..53f7f2e 100644 (file)
@@ -3160,8 +3160,6 @@ static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev)
        b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0414, 0x01F4);
 
        ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */
-       memset(wl->bssid, 0, ETH_ALEN);
-       memset(wl->mac_addr, 0, ETH_ALEN);
        b43legacy_upload_card_macaddress(dev);
        b43legacy_security_init(dev);
        b43legacy_rng_init(wl);
@@ -3263,6 +3261,13 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
         * LEDs that are registered later depend on it. */
        b43legacy_rfkill_init(dev);
 
+       /* Kill all old instance specific information to make sure
+        * the card won't use it in the short timeframe between start
+        * and mac80211 reconfiguring it. */
+       memset(wl->bssid, 0, ETH_ALEN);
+       memset(wl->mac_addr, 0, ETH_ALEN);
+       wl->filter_flags = 0;
+
        mutex_lock(&wl->mutex);
 
        if (b43legacy_status(dev) < B43legacy_STAT_INITIALIZED) {
index e4f4c5c..bcdd54e 100644 (file)
@@ -181,7 +181,7 @@ union txhdr_union {
        struct b43legacy_txhdr_fw3 txhdr_fw3;
 };
 
-static void pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
+static int pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
                                  struct sk_buff *skb,
                                  struct b43legacy_pio_txpacket *packet,
                                  size_t txhdr_size)
@@ -189,14 +189,17 @@ static void pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
        union txhdr_union txhdr_data;
        u8 *txhdr = NULL;
        unsigned int octets;
+       int err;
 
        txhdr = (u8 *)(&txhdr_data.txhdr_fw3);
 
        B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
-       b43legacy_generate_txhdr(queue->dev,
+       err = b43legacy_generate_txhdr(queue->dev,
                                 txhdr, skb->data, skb->len,
                                 &packet->txstat.control,
                                 generate_cookie(queue, packet));
+       if (err)
+               return err;
 
        tx_start(queue);
        octets = skb->len + txhdr_size;
@@ -204,6 +207,8 @@ static void pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
                octets--;
        tx_data(queue, txhdr, (u8 *)skb->data, octets);
        tx_complete(queue, skb);
+
+       return 0;
 }
 
 static void free_txpacket(struct b43legacy_pio_txpacket *packet,
@@ -226,6 +231,7 @@ static int pio_tx_packet(struct b43legacy_pio_txpacket *packet)
        struct b43legacy_pioqueue *queue = packet->queue;
        struct sk_buff *skb = packet->skb;
        u16 octets;
+       int err;
 
        octets = (u16)skb->len + sizeof(struct b43legacy_txhdr_fw3);
        if (queue->tx_devq_size < octets) {
@@ -247,8 +253,14 @@ static int pio_tx_packet(struct b43legacy_pio_txpacket *packet)
        if (queue->tx_devq_used + octets > queue->tx_devq_size)
                return -EBUSY;
        /* Now poke the device. */
-       pio_tx_write_fragment(queue, skb, packet,
+       err = pio_tx_write_fragment(queue, skb, packet,
                              sizeof(struct b43legacy_txhdr_fw3));
+       if (unlikely(err == -ENOKEY)) {
+               /* Drop this packet, as we don't have the encryption key
+                * anymore and must not transmit it unencrypted. */
+               free_txpacket(packet, 1);
+               return 0;
+       }
 
        /* Account for the packet size.
         * (We must not overflow the device TX queue)
@@ -486,6 +498,9 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
        queue = parse_cookie(dev, status->cookie, &packet);
        B43legacy_WARN_ON(!queue);
 
+       if (!packet->skb)
+               return;
+
        queue->tx_devq_packets--;
        queue->tx_devq_used -= (packet->skb->len +
                                sizeof(struct b43legacy_txhdr_fw3));
index e20c552..d84408a 100644 (file)
@@ -181,7 +181,7 @@ static u8 b43legacy_calc_fallback_rate(u8 bitrate)
        return 0;
 }
 
-static void generate_txhdr_fw3(struct b43legacy_wldev *dev,
+static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
                               struct b43legacy_txhdr_fw3 *txhdr,
                               const unsigned char *fragment_data,
                               unsigned int fragment_len,
@@ -252,6 +252,13 @@ static void generate_txhdr_fw3(struct b43legacy_wldev *dev,
                        iv_len = min((size_t)txctl->iv_len,
                                     ARRAY_SIZE(txhdr->iv));
                        memcpy(txhdr->iv, ((u8 *)wlhdr) + wlhdr_len, iv_len);
+               } else {
+                       /* This key is invalid. This might only happen
+                        * in a short timeframe after machine resume before
+                        * we were able to reconfigure keys.
+                        * Drop this packet completely. Do not transmit it
+                        * unencrypted to avoid leaking information. */
+                       return -ENOKEY;
                }
        }
        b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
@@ -345,16 +352,18 @@ static void generate_txhdr_fw3(struct b43legacy_wldev *dev,
        /* Apply the bitfields */
        txhdr->mac_ctl = cpu_to_le32(mac_ctl);
        txhdr->phy_ctl = cpu_to_le16(phy_ctl);
+
+       return 0;
 }
 
-void b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
+int b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
                              u8 *txhdr,
                              const unsigned char *fragment_data,
                              unsigned int fragment_len,
                              const struct ieee80211_tx_control *txctl,
                              u16 cookie)
 {
-       generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr,
+       return generate_txhdr_fw3(dev, (struct b43legacy_txhdr_fw3 *)txhdr,
                           fragment_data, fragment_len,
                           txctl, cookie);
 }
index 8a155d0..bab4792 100644 (file)
@@ -76,7 +76,7 @@ struct b43legacy_txhdr_fw3 {
 
 
 
-void b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
+int b43legacy_generate_txhdr(struct b43legacy_wldev *dev,
                              u8 *txhdr,
                              const unsigned char *fragment_data,
                              unsigned int fragment_len,
index f55c757..5ee1ad6 100644 (file)
@@ -4207,13 +4207,13 @@ static u8 ratio2dB[100] = {
  * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
 int iwl3945_calc_db_from_ratio(int sig_ratio)
 {
-       /* Anything above 1000:1 just report as 60 dB */
-       if (sig_ratio > 1000)
+       /* 1000:1 or higher just report as 60 dB */
+       if (sig_ratio >= 1000)
                return 60;
 
-       /* Above 100:1, divide by 10 and use table,
+       /* 100:1 or higher, divide by 10 and use table,
         *   add 20 dB to make up for divide by 10 */
-       if (sig_ratio > 100)
+       if (sig_ratio >= 100)
                return (20 + (int)ratio2dB[sig_ratio/10]);
 
        /* We shouldn't see this */
index db390c5..6115545 100644 (file)
 #include <linux/netdevice.h>
 #include <linux/hdlc/ioctl.h>
 
-
-/* Used by all network devices here, pointed to by netdev_priv(dev) */
-struct hdlc_device_desc {
-       int (*netif_rx)(struct sk_buff *skb);
-       struct net_device_stats stats;
-};
-
 /* This structure is a private property of HDLC protocols.
    Hardware drivers have no interest here */
 
@@ -44,12 +37,15 @@ struct hdlc_proto {
        void (*detach)(struct net_device *dev);
        int (*ioctl)(struct net_device *dev, struct ifreq *ifr);
        __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev);
+       int (*netif_rx)(struct sk_buff *skb);
        struct module *module;
        struct hdlc_proto *next; /* next protocol in the list */
 };
 
 
+/* Pointed to by dev->priv */
 typedef struct hdlc_device {
+       struct net_device_stats stats;
        /* used by HDLC layer to take control over HDLC device from hw driver*/
        int (*attach)(struct net_device *dev,
                      unsigned short encoding, unsigned short parity);
@@ -83,18 +79,11 @@ void unregister_hdlc_protocol(struct hdlc_proto *proto);
 
 struct net_device *alloc_hdlcdev(void *priv);
 
-
-static __inline__ struct hdlc_device_desc* dev_to_desc(struct net_device *dev)
-{
-       return netdev_priv(dev);
-}
-
-static __inline__ hdlc_device* dev_to_hdlc(struct net_device *dev)
+static inline struct hdlc_device* dev_to_hdlc(struct net_device *dev)
 {
-       return netdev_priv(dev) + sizeof(struct hdlc_device_desc);
+       return dev->priv;
 }
 
-
 static __inline__ void debug_frame(const struct sk_buff *skb)
 {
        int i;
@@ -116,13 +105,13 @@ int hdlc_open(struct net_device *dev);
 void hdlc_close(struct net_device *dev);
 
 int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
-                        int (*rx)(struct sk_buff *skb), size_t size);
+                        size_t size);
 /* May be used by hardware driver to gain control over HDLC device */
 void detach_hdlc_protocol(struct net_device *dev);
 
 static __inline__ struct net_device_stats *hdlc_stats(struct net_device *dev)
 {
-       return &dev_to_desc(dev)->stats;
+       return &dev_to_hdlc(dev)->stats;
 }
 
 
index 34f40ef..79504b2 100644 (file)
@@ -327,7 +327,7 @@ static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, unsigned short t
  * 
  * Returns error if the skb is not of VLAN type
  */
-static inline int __vlan_get_tag(struct sk_buff *skb, unsigned short *tag)
+static inline int __vlan_get_tag(const struct sk_buff *skb, unsigned short *tag)
 {
        struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data;
 
@@ -347,7 +347,8 @@ static inline int __vlan_get_tag(struct sk_buff *skb, unsigned short *tag)
  * 
  * Returns error if @skb->cb[] is not set correctly
  */
-static inline int __vlan_hwaccel_get_tag(struct sk_buff *skb, unsigned short *tag)
+static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
+                                        unsigned short *tag)
 {
        struct vlan_skb_tx_cookie *cookie;
 
@@ -370,7 +371,7 @@ static inline int __vlan_hwaccel_get_tag(struct sk_buff *skb, unsigned short *ta
  * 
  * Returns error if the skb is not VLAN tagged
  */
-static inline int vlan_get_tag(struct sk_buff *skb, unsigned short *tag)
+static inline int vlan_get_tag(const struct sk_buff *skb, unsigned short *tag)
 {
        if (skb->dev->features & NETIF_F_HW_VLAN_TX) {
                return __vlan_hwaccel_get_tag(skb, tag);
index 40fac8c..28dfc61 100644 (file)
@@ -348,6 +348,7 @@ enum
        FLOW_KEY_RTCLASSID,
        FLOW_KEY_SKUID,
        FLOW_KEY_SKGID,
+       FLOW_KEY_VLAN_TAG,
        __FLOW_KEY_MAX,
 };
 
index e18f5c2..9d5da8b 100644 (file)
@@ -373,6 +373,15 @@ void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state)
        if (sdev->bus->bustype == SSB_BUSTYPE_PCI)
                pci_set_power_state(sdev->bus->host_pci, state);
 }
+#else
+static inline void ssb_pcihost_unregister(struct pci_driver *driver)
+{
+}
+
+static inline
+void ssb_pcihost_set_power_state(struct ssb_device *sdev, pci_power_t state)
+{
+}
 #endif /* CONFIG_SSB_PCIHOST */
 
 
index 749fa04..85c680a 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/net.h>
 
 #include <net/ip_vs.h>
 
@@ -169,7 +170,7 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
                                 */
                                if (mark->cw == 0) {
                                        mark->cl = &svc->destinations;
-                                       IP_VS_INFO("ip_vs_wrr_schedule(): "
+                                       IP_VS_ERR_RL("ip_vs_wrr_schedule(): "
                                                   "no available servers\n");
                                        dest = NULL;
                                        goto out;
index e77592d..45c7c0c 100644 (file)
@@ -1,6 +1,5 @@
 config MAC80211
        tristate "Generic IEEE 802.11 Networking Stack (mac80211)"
-       depends on EXPERIMENTAL
        select CRYPTO
        select CRYPTO_ECB
        select CRYPTO_ARC4
index 8d76986..971b867 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
+#include <linux/if_vlan.h>
 
 #include <net/pkt_cls.h>
 #include <net/ip.h>
@@ -270,6 +271,15 @@ static u32 flow_get_skgid(const struct sk_buff *skb)
        return 0;
 }
 
+static u32 flow_get_vlan_tag(const struct sk_buff *skb)
+{
+       u16 uninitialized_var(tag);
+
+       if (vlan_get_tag(skb, &tag) < 0)
+               return 0;
+       return tag & VLAN_VID_MASK;
+}
+
 static u32 flow_key_get(const struct sk_buff *skb, int key)
 {
        switch (key) {
@@ -305,6 +315,8 @@ static u32 flow_key_get(const struct sk_buff *skb, int key)
                return flow_get_skuid(skb);
        case FLOW_KEY_SKGID:
                return flow_get_skgid(skb);
+       case FLOW_KEY_VLAN_TAG:
+               return flow_get_vlan_tag(skb);
        default:
                WARN_ON(1);
                return 0;
@@ -402,12 +414,13 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
 
        if (tb[TCA_FLOW_KEYS]) {
                keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
-               if (fls(keymask) - 1 > FLOW_KEY_MAX)
-                       return -EOPNOTSUPP;
 
                nkeys = hweight32(keymask);
                if (nkeys == 0)
                        return -EINVAL;
+
+               if (fls(keymask) - 1 > FLOW_KEY_MAX)
+                       return -EOPNOTSUPP;
        }
 
        err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
index 9c2ec19..2a7e648 100644 (file)
@@ -176,7 +176,7 @@ META_COLLECTOR(var_dev)
 
 META_COLLECTOR(int_vlan_tag)
 {
-       unsigned short tag;
+       unsigned short uninitialized_var(tag);
        if (vlan_get_tag(skb, &tag) < 0)
                *err = -1;
        else