1 /* $Id: sungem.c,v 1.1.1.1 2005/04/11 02:50:29 jack Exp $
2 * sungem.c: Sun GEM ethernet driver.
4 * Copyright (C) 2000, 2001, 2002 David S. Miller (davem@redhat.com)
6 * Support for Apple GMAC and assorted PHYs by
7 * Benjamin Herrenscmidt (benh@kernel.crashing.org)
10 * - Get rid of all those nasty mdelay's and replace them
11 * with schedule_timeout.
15 #include <linux/config.h>
17 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/interrupt.h>
24 #include <linux/ptrace.h>
25 #include <linux/ioport.h>
27 #include <linux/slab.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/errno.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/mii.h>
37 #include <linux/ethtool.h>
38 #include <linux/crc32.h>
39 #include <linux/random.h>
41 #include <asm/system.h>
42 #include <asm/bitops.h>
44 #include <asm/byteorder.h>
45 #include <asm/uaccess.h>
49 #include <asm/idprom.h>
50 #include <asm/openprom.h>
51 #include <asm/oplib.h>
56 #include <asm/pci-bridge.h>
58 #include <asm/machdep.h>
59 #include <asm/pmac_feature.h>
62 #include "sungem_phy.h"
65 #define DEFAULT_MSG (NETIF_MSG_DRV | \
69 #define DRV_NAME "sungem"
70 #define DRV_VERSION "0.97"
71 #define DRV_RELDATE "3/20/02"
72 #define DRV_AUTHOR "David S. Miller (davem@redhat.com)"
74 static char version[] __devinitdata =
75 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
77 MODULE_AUTHOR(DRV_AUTHOR);
78 MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver");
79 MODULE_LICENSE("GPL");
81 MODULE_PARM(gem_debug, "i");
82 MODULE_PARM_DESC(gem_debug, "bitmapped message enable number");
83 MODULE_PARM(link_mode, "i");
84 MODULE_PARM_DESC(forced_speed, "force link speed (10,100,1000)");
85 MODULE_PARM_DESC(forced_duplex, "force link duplex (0: half, 1: full)");
88 static int forced_speed = -1;
89 static int forced_duplex = -1;
91 #define GEM_MODULE_NAME "gem"
92 #define PFX GEM_MODULE_NAME ": "
94 static struct pci_device_id gem_pci_tbl[] __devinitdata = {
95 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
96 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
98 /* These models only differ from the original GEM in
99 * that their tx/rx fifos are of a different size and
100 * they only support 10/100 speeds. -DaveM
102 * Apple's GMAC does support gigabit on machines with
103 * the BCM54xx PHYs. -BenH
105 { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM,
106 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
107 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC,
108 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
109 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP,
110 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
111 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2,
112 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
116 MODULE_DEVICE_TABLE(pci, gem_pci_tbl);
118 static u16 __phy_read(struct gem *gp, int phy_addr, int reg)
125 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
126 cmd |= (reg << 18) & MIF_FRAME_REGAD;
127 cmd |= (MIF_FRAME_TAMSB);
128 writel(cmd, gp->regs + MIF_FRAME);
131 cmd = readl(gp->regs + MIF_FRAME);
132 if (cmd & MIF_FRAME_TALSB)
141 return cmd & MIF_FRAME_DATA;
144 static inline int _phy_read(struct net_device *dev, int mii_id, int reg)
146 struct gem *gp = dev->priv;
147 return __phy_read(gp, mii_id, reg);
150 static inline u16 phy_read(struct gem *gp, int reg)
152 return __phy_read(gp, gp->mii_phy_addr, reg);
155 static void __phy_write(struct gem *gp, int phy_addr, int reg, u16 val)
162 cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD;
163 cmd |= (reg << 18) & MIF_FRAME_REGAD;
164 cmd |= (MIF_FRAME_TAMSB);
165 cmd |= (val & MIF_FRAME_DATA);
166 writel(cmd, gp->regs + MIF_FRAME);
169 cmd = readl(gp->regs + MIF_FRAME);
170 if (cmd & MIF_FRAME_TALSB)
177 static inline void _phy_write(struct net_device *dev, int mii_id, int reg, int val)
179 struct gem *gp = dev->priv;
180 __phy_write(gp, mii_id, reg, val & 0xffff);
183 static inline void phy_write(struct gem *gp, int reg, u16 val)
185 __phy_write(gp, gp->mii_phy_addr, reg, val);
188 static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits)
190 if (netif_msg_intr(gp))
191 printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name);
194 static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
196 u32 pcs_istat = readl(gp->regs + PCS_ISTAT);
199 if (netif_msg_intr(gp))
200 printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n",
201 gp->dev->name, pcs_istat);
203 if (!(pcs_istat & PCS_ISTAT_LSC)) {
204 printk(KERN_ERR "%s: PCS irq but no link status change???\n",
209 /* The link status bit latches on zero, so you must
210 * read it twice in such a case to see a transition
211 * to the link being up.
213 pcs_miistat = readl(gp->regs + PCS_MIISTAT);
214 if (!(pcs_miistat & PCS_MIISTAT_LS))
216 (readl(gp->regs + PCS_MIISTAT) &
219 if (pcs_miistat & PCS_MIISTAT_ANC) {
220 /* The remote-fault indication is only valid
221 * when autoneg has completed.
223 if (pcs_miistat & PCS_MIISTAT_RF)
224 printk(KERN_INFO "%s: PCS AutoNEG complete, "
225 "RemoteFault\n", dev->name);
227 printk(KERN_INFO "%s: PCS AutoNEG complete.\n",
231 if (pcs_miistat & PCS_MIISTAT_LS) {
232 printk(KERN_INFO "%s: PCS link is now up.\n",
234 netif_carrier_on(gp->dev);
236 printk(KERN_INFO "%s: PCS link is now down.\n",
238 netif_carrier_off(gp->dev);
239 /* If this happens and the link timer is not running,
240 * reset so we re-negotiate.
242 if (!timer_pending(&gp->link_timer))
249 static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
251 u32 txmac_stat = readl(gp->regs + MAC_TXSTAT);
253 if (netif_msg_intr(gp))
254 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
255 gp->dev->name, txmac_stat);
257 /* Defer timer expiration is quite normal,
258 * don't even log the event.
260 if ((txmac_stat & MAC_TXSTAT_DTE) &&
261 !(txmac_stat & ~MAC_TXSTAT_DTE))
264 if (txmac_stat & MAC_TXSTAT_URUN) {
265 printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
267 gp->net_stats.tx_fifo_errors++;
270 if (txmac_stat & MAC_TXSTAT_MPE) {
271 printk(KERN_ERR "%s: TX MAC max packet size error.\n",
273 gp->net_stats.tx_errors++;
276 /* The rest are all cases of one of the 16-bit TX
279 if (txmac_stat & MAC_TXSTAT_NCE)
280 gp->net_stats.collisions += 0x10000;
282 if (txmac_stat & MAC_TXSTAT_ECE) {
283 gp->net_stats.tx_aborted_errors += 0x10000;
284 gp->net_stats.collisions += 0x10000;
287 if (txmac_stat & MAC_TXSTAT_LCE) {
288 gp->net_stats.tx_aborted_errors += 0x10000;
289 gp->net_stats.collisions += 0x10000;
292 /* We do not keep track of MAC_TXSTAT_FCE and
293 * MAC_TXSTAT_PCE events.
298 /* When we get a RX fifo overflow, the RX unit in GEM is probably hung
299 * so we do the following.
301 * If any part of the reset goes wrong, we return 1 and that causes the
302 * whole chip to be reset.
304 static int gem_rxmac_reset(struct gem *gp)
306 struct net_device *dev = gp->dev;
311 /* First, reset MAC RX. */
312 writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB,
313 gp->regs + MAC_RXCFG);
314 for (limit = 0; limit < 5000; limit++) {
315 if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB))
320 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
321 "chip.\n", dev->name);
325 /* Second, disable RX DMA. */
326 writel(0, gp->regs + RXDMA_CFG);
327 for (limit = 0; limit < 5000; limit++) {
328 if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE))
333 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
334 "chip.\n", dev->name);
340 /* Execute RX reset command. */
341 writel(gp->swrst_base | GREG_SWRST_RXRST,
342 gp->regs + GREG_SWRST);
343 for (limit = 0; limit < 5000; limit++) {
344 if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST))
349 printk(KERN_ERR "%s: RX reset command will not execute, resetting "
350 "whole chip.\n", dev->name);
354 /* Refresh the RX ring. */
355 for (i = 0; i < RX_RING_SIZE; i++) {
356 struct gem_rxd *rxd = &gp->init_block->rxd[i];
358 if (gp->rx_skbs[i] == NULL) {
359 printk(KERN_ERR "%s: Parts of RX ring empty, resetting "
360 "whole chip.\n", dev->name);
364 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
366 gp->rx_new = gp->rx_old = 0;
368 /* Now we must reprogram the rest of RX unit. */
369 desc_dma = (u64) gp->gblock_dvma;
370 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
371 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
372 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
373 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
374 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
375 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
376 writel(val, gp->regs + RXDMA_CFG);
377 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
378 writel(((5 & RXDMA_BLANK_IPKTS) |
379 ((8 << 12) & RXDMA_BLANK_ITIME)),
380 gp->regs + RXDMA_BLANK);
382 writel(((5 & RXDMA_BLANK_IPKTS) |
383 ((4 << 12) & RXDMA_BLANK_ITIME)),
384 gp->regs + RXDMA_BLANK);
385 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
386 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
387 writel(val, gp->regs + RXDMA_PTHRESH);
388 val = readl(gp->regs + RXDMA_CFG);
389 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
390 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
391 val = readl(gp->regs + MAC_RXCFG);
392 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
397 static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
399 u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT);
402 if (netif_msg_intr(gp))
403 printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n",
404 gp->dev->name, rxmac_stat);
406 if (rxmac_stat & MAC_RXSTAT_OFLW) {
407 gp->net_stats.rx_over_errors++;
408 gp->net_stats.rx_fifo_errors++;
410 ret = gem_rxmac_reset(gp);
413 if (rxmac_stat & MAC_RXSTAT_ACE)
414 gp->net_stats.rx_frame_errors += 0x10000;
416 if (rxmac_stat & MAC_RXSTAT_CCE)
417 gp->net_stats.rx_crc_errors += 0x10000;
419 if (rxmac_stat & MAC_RXSTAT_LCE)
420 gp->net_stats.rx_length_errors += 0x10000;
422 /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
428 static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
430 u32 mac_cstat = readl(gp->regs + MAC_CSTAT);
432 if (netif_msg_intr(gp))
433 printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n",
434 gp->dev->name, mac_cstat);
436 /* This interrupt is just for pause frame and pause
437 * tracking. It is useful for diagnostics and debug
438 * but probably by default we will mask these events.
440 if (mac_cstat & MAC_CSTAT_PS)
443 if (mac_cstat & MAC_CSTAT_PRCV)
444 gp->pause_last_time_recvd = (mac_cstat >> 16);
449 static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
451 u32 mif_status = readl(gp->regs + MIF_STATUS);
452 u32 reg_val, changed_bits;
454 reg_val = (mif_status & MIF_STATUS_DATA) >> 16;
455 changed_bits = (mif_status & MIF_STATUS_STAT);
457 gem_handle_mif_event(gp, reg_val, changed_bits);
462 static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status)
464 u32 pci_estat = readl(gp->regs + GREG_PCIESTAT);
466 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
467 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
468 printk(KERN_ERR "%s: PCI error [%04x] ",
469 dev->name, pci_estat);
471 if (pci_estat & GREG_PCIESTAT_BADACK)
472 printk("<No ACK64# during ABS64 cycle> ");
473 if (pci_estat & GREG_PCIESTAT_DTRTO)
474 printk("<Delayed transaction timeout> ");
475 if (pci_estat & GREG_PCIESTAT_OTHER)
479 pci_estat |= GREG_PCIESTAT_OTHER;
480 printk(KERN_ERR "%s: PCI error\n", dev->name);
483 if (pci_estat & GREG_PCIESTAT_OTHER) {
486 /* Interrogate PCI config space for the
489 pci_read_config_word(gp->pdev, PCI_STATUS,
491 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
492 dev->name, pci_cfg_stat);
493 if (pci_cfg_stat & PCI_STATUS_PARITY)
494 printk(KERN_ERR "%s: PCI parity error detected.\n",
496 if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT)
497 printk(KERN_ERR "%s: PCI target abort.\n",
499 if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT)
500 printk(KERN_ERR "%s: PCI master acks target abort.\n",
502 if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT)
503 printk(KERN_ERR "%s: PCI master abort.\n",
505 if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR)
506 printk(KERN_ERR "%s: PCI system error SERR#.\n",
508 if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY)
509 printk(KERN_ERR "%s: PCI parity error.\n",
512 /* Write the error bits back to clear them. */
513 pci_cfg_stat &= (PCI_STATUS_PARITY |
514 PCI_STATUS_SIG_TARGET_ABORT |
515 PCI_STATUS_REC_TARGET_ABORT |
516 PCI_STATUS_REC_MASTER_ABORT |
517 PCI_STATUS_SIG_SYSTEM_ERROR |
518 PCI_STATUS_DETECTED_PARITY);
519 pci_write_config_word(gp->pdev,
520 PCI_STATUS, pci_cfg_stat);
523 /* For all PCI errors, we should reset the chip. */
527 /* All non-normal interrupt conditions get serviced here.
528 * Returns non-zero if we should just exit the interrupt
529 * handler right now (ie. if we reset the card which invalidates
530 * all of the other original irq status bits).
532 static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status)
534 if (gem_status & GREG_STAT_RXNOBUF) {
535 /* Frame arrived, no free RX buffers available. */
536 if (netif_msg_rx_err(gp))
537 printk(KERN_DEBUG "%s: no buffer for rx frame\n",
539 gp->net_stats.rx_dropped++;
542 if (gem_status & GREG_STAT_RXTAGERR) {
543 /* corrupt RX tag framing */
544 if (netif_msg_rx_err(gp))
545 printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
547 gp->net_stats.rx_errors++;
552 if (gem_status & GREG_STAT_PCS) {
553 if (gem_pcs_interrupt(dev, gp, gem_status))
557 if (gem_status & GREG_STAT_TXMAC) {
558 if (gem_txmac_interrupt(dev, gp, gem_status))
562 if (gem_status & GREG_STAT_RXMAC) {
563 if (gem_rxmac_interrupt(dev, gp, gem_status))
567 if (gem_status & GREG_STAT_MAC) {
568 if (gem_mac_interrupt(dev, gp, gem_status))
572 if (gem_status & GREG_STAT_MIF) {
573 if (gem_mif_interrupt(dev, gp, gem_status))
577 if (gem_status & GREG_STAT_PCIERR) {
578 if (gem_pci_interrupt(dev, gp, gem_status))
585 gp->reset_task_pending = 2;
586 schedule_task(&gp->reset_task);
591 static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status)
595 if (netif_msg_intr(gp))
596 printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n",
597 gp->dev->name, gem_status);
600 limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT);
601 while (entry != limit) {
608 if (netif_msg_tx_done(gp))
609 printk(KERN_DEBUG "%s: tx done, slot %d\n",
610 gp->dev->name, entry);
611 skb = gp->tx_skbs[entry];
612 if (skb_shinfo(skb)->nr_frags) {
613 int last = entry + skb_shinfo(skb)->nr_frags;
617 last &= (TX_RING_SIZE - 1);
619 walk = NEXT_TX(walk);
628 gp->tx_skbs[entry] = NULL;
629 gp->net_stats.tx_bytes += skb->len;
631 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
632 txd = &gp->init_block->txd[entry];
634 dma_addr = le64_to_cpu(txd->buffer);
635 dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
637 pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
638 entry = NEXT_TX(entry);
641 gp->net_stats.tx_packets++;
642 dev_kfree_skb_irq(skb);
646 if (netif_queue_stopped(dev) &&
647 TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))
648 netif_wake_queue(dev);
651 static __inline__ void gem_post_rxds(struct gem *gp, int limit)
653 int cluster_start, curr, count, kick;
655 cluster_start = curr = (gp->rx_new & ~(4 - 1));
658 while (curr != limit) {
659 curr = NEXT_RX(curr);
661 struct gem_rxd *rxd =
662 &gp->init_block->rxd[cluster_start];
664 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
666 cluster_start = NEXT_RX(cluster_start);
667 if (cluster_start == curr)
675 writel(kick, gp->regs + RXDMA_KICK);
678 static void gem_rx(struct gem *gp)
682 if (netif_msg_intr(gp))
683 printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
684 gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new);
689 struct gem_rxd *rxd = &gp->init_block->rxd[entry];
691 u64 status = cpu_to_le64(rxd->status_word);
695 if ((status & RXDCTRL_OWN) != 0)
698 skb = gp->rx_skbs[entry];
700 len = (status & RXDCTRL_BUFSZ) >> 16;
701 if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
702 gp->net_stats.rx_errors++;
704 gp->net_stats.rx_length_errors++;
705 if (len & RXDCTRL_BAD)
706 gp->net_stats.rx_crc_errors++;
708 /* We'll just return it to GEM. */
710 gp->net_stats.rx_dropped++;
714 dma_addr = cpu_to_le64(rxd->buffer);
715 if (len > RX_COPY_THRESHOLD) {
716 struct sk_buff *new_skb;
718 new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
719 if (new_skb == NULL) {
723 pci_unmap_page(gp->pdev, dma_addr,
724 RX_BUF_ALLOC_SIZE(gp),
726 gp->rx_skbs[entry] = new_skb;
727 new_skb->dev = gp->dev;
728 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET));
729 rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
730 virt_to_page(new_skb->data),
731 ((unsigned long) new_skb->data &
733 RX_BUF_ALLOC_SIZE(gp),
734 PCI_DMA_FROMDEVICE));
735 skb_reserve(new_skb, RX_OFFSET);
737 /* Trim the original skb for the netif. */
740 struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
742 if (copy_skb == NULL) {
747 copy_skb->dev = gp->dev;
748 skb_reserve(copy_skb, 2);
749 skb_put(copy_skb, len);
750 pci_dma_sync_single(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
751 memcpy(copy_skb->data, skb->data, len);
753 /* We'll reuse the original ring buffer. */
757 skb->csum = ntohs((status & RXDCTRL_TCPCSUM) ^ 0xffff);
758 skb->ip_summed = CHECKSUM_HW;
759 skb->protocol = eth_type_trans(skb, gp->dev);
762 gp->net_stats.rx_packets++;
763 gp->net_stats.rx_bytes += len;
764 gp->dev->last_rx = jiffies;
767 entry = NEXT_RX(entry);
770 gem_post_rxds(gp, entry);
775 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
779 static void gem_interrupt(int irq, void *dev_id, struct pt_regs *regs)
781 struct net_device *dev = dev_id;
782 struct gem *gp = dev->priv;
783 u32 gem_status = readl(gp->regs + GREG_STAT);
785 spin_lock(&gp->lock);
787 if (gem_status & GREG_STAT_ABNORMAL) {
788 if (gem_abnormal_irq(dev, gp, gem_status))
791 if (gem_status & (GREG_STAT_TXALL | GREG_STAT_TXINTME))
792 gem_tx(dev, gp, gem_status);
793 if (gem_status & GREG_STAT_RXDONE)
797 spin_unlock(&gp->lock);
800 static void gem_tx_timeout(struct net_device *dev)
802 struct gem *gp = dev->priv;
804 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
805 if (!gp->hw_running) {
806 printk("%s: hrm.. hw not running !\n", dev->name);
809 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n",
811 readl(gp->regs + TXDMA_CFG),
812 readl(gp->regs + MAC_TXSTAT),
813 readl(gp->regs + MAC_TXCFG));
814 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
816 readl(gp->regs + RXDMA_CFG),
817 readl(gp->regs + MAC_RXSTAT),
818 readl(gp->regs + MAC_RXCFG));
820 spin_lock_irq(&gp->lock);
822 gp->reset_task_pending = 2;
823 schedule_task(&gp->reset_task);
825 spin_unlock_irq(&gp->lock);
828 static __inline__ int gem_intme(int entry)
830 /* Algorithm: IRQ every 1/2 of descriptors. */
831 if (!(entry & ((TX_RING_SIZE>>1)-1)))
837 static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
839 struct gem *gp = dev->priv;
844 if (skb->ip_summed == CHECKSUM_HW) {
845 u64 csum_start_off, csum_stuff_off;
847 csum_start_off = (u64) (skb->h.raw - skb->data);
848 csum_stuff_off = (u64) ((skb->h.raw + skb->csum) - skb->data);
850 ctrl = (TXDCTRL_CENAB |
851 (csum_start_off << 15) |
852 (csum_stuff_off << 21));
855 spin_lock_irq(&gp->lock);
857 /* This is a hard error, log it. */
858 if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) {
859 netif_stop_queue(dev);
860 spin_unlock_irq(&gp->lock);
861 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
867 gp->tx_skbs[entry] = skb;
869 if (skb_shinfo(skb)->nr_frags == 0) {
870 struct gem_txd *txd = &gp->init_block->txd[entry];
875 mapping = pci_map_page(gp->pdev,
876 virt_to_page(skb->data),
877 ((unsigned long) skb->data &
879 len, PCI_DMA_TODEVICE);
880 ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
881 if (gem_intme(entry))
882 ctrl |= TXDCTRL_INTME;
883 txd->buffer = cpu_to_le64(mapping);
884 txd->control_word = cpu_to_le64(ctrl);
885 entry = NEXT_TX(entry);
890 dma_addr_t first_mapping;
891 int frag, first_entry = entry;
894 if (gem_intme(entry))
895 intme |= TXDCTRL_INTME;
897 /* We must give this initial chunk to the device last.
898 * Otherwise we could race with the device.
900 first_len = skb->len - skb->data_len;
901 first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
902 ((unsigned long) skb->data & ~PAGE_MASK),
903 first_len, PCI_DMA_TODEVICE);
904 entry = NEXT_TX(entry);
906 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
907 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
912 len = this_frag->size;
913 mapping = pci_map_page(gp->pdev,
915 this_frag->page_offset,
916 len, PCI_DMA_TODEVICE);
918 if (frag == skb_shinfo(skb)->nr_frags - 1)
919 this_ctrl |= TXDCTRL_EOF;
921 txd = &gp->init_block->txd[entry];
922 txd->buffer = cpu_to_le64(mapping);
923 txd->control_word = cpu_to_le64(this_ctrl | len);
925 if (gem_intme(entry))
926 intme |= TXDCTRL_INTME;
928 entry = NEXT_TX(entry);
930 txd = &gp->init_block->txd[first_entry];
931 txd->buffer = cpu_to_le64(first_mapping);
933 cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len);
937 if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))
938 netif_stop_queue(dev);
940 if (netif_msg_tx_queued(gp))
941 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
942 dev->name, entry, skb->len);
943 writel(gp->tx_new, gp->regs + TXDMA_KICK);
944 spin_unlock_irq(&gp->lock);
946 dev->trans_start = jiffies;
951 /* Jumbo-grams don't seem to work :-( */
952 #define GEM_MIN_MTU 68
954 #define GEM_MAX_MTU 1500
956 #define GEM_MAX_MTU 9000
959 static int gem_change_mtu(struct net_device *dev, int new_mtu)
961 struct gem *gp = dev->priv;
963 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU)
966 if (!netif_running(dev) || !netif_device_present(dev)) {
967 /* We'll just catch it later when the
968 * device is up'd or resumed.
974 spin_lock_irq(&gp->lock);
976 gp->reset_task_pending = 1;
977 schedule_task(&gp->reset_task);
978 spin_unlock_irq(&gp->lock);
980 flush_scheduled_tasks();
985 #define STOP_TRIES 32
987 /* Must be invoked under gp->lock. */
988 static void gem_stop(struct gem *gp)
993 /* Make sure we won't get any more interrupts */
994 writel(0xffffffff, gp->regs + GREG_IMASK);
997 writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST,
998 gp->regs + GREG_SWRST);
1004 val = readl(gp->regs + GREG_SWRST);
1007 } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
1010 printk(KERN_ERR "gem: SW reset is ghetto.\n");
1013 /* Must be invoked under gp->lock. */
1014 static void gem_start_dma(struct gem *gp)
1018 /* We are ready to rock, turn everything on. */
1019 val = readl(gp->regs + TXDMA_CFG);
1020 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
1021 val = readl(gp->regs + RXDMA_CFG);
1022 writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG);
1023 val = readl(gp->regs + MAC_TXCFG);
1024 writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG);
1025 val = readl(gp->regs + MAC_RXCFG);
1026 writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
1028 (void) readl(gp->regs + MAC_RXCFG);
1031 writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK);
1033 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1038 /* Must be invoked under gp->lock. */
1039 // XXX dbl check what that function should do when called on PCS PHY
1040 static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
1047 /* Default advertise */
1048 advertise = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1049 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
1050 if (gp->gigabit_capable)
1051 advertise |= ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full;
1052 autoneg = gp->want_autoneg;
1053 forced_speed = gp->phy_mii.speed;
1054 forced_duplex = gp->phy_mii.duplex;
1056 /* Setup link parameters */
1059 if (ep->autoneg == AUTONEG_ENABLE) {
1060 advertise = ep->advertising;
1064 forced_speed = ep->speed;
1065 forced_duplex = ep->duplex;
1069 if (!gp->hw_running) {
1070 gp->phy_mii.autoneg = gp->want_autoneg = autoneg;
1071 gp->phy_mii.speed = forced_speed;
1072 gp->phy_mii.duplex = forced_duplex;
1076 /* Configure PHY & start aneg */
1077 gp->want_autoneg = autoneg;
1079 if (found_mii_phy(gp))
1080 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise);
1081 gp->lstate = link_aneg;
1083 if (found_mii_phy(gp))
1084 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, forced_speed,
1086 gp->lstate = link_force_ok;
1089 gp->timer_ticks = 0;
1090 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1093 /* A link-up condition has occurred, initialize and enable the
1096 * Must be invoked under gp->lock.
1098 static int gem_set_link_modes(struct gem *gp)
1101 int full_duplex, speed, pause;
1107 if (found_mii_phy(gp)) {
1108 if (gp->phy_mii.def->ops->read_link(&gp->phy_mii))
1110 full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL);
1111 speed = gp->phy_mii.speed;
1112 pause = gp->phy_mii.pause;
1113 } else if (gp->phy_type == phy_serialink ||
1114 gp->phy_type == phy_serdes) {
1115 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1117 if (pcs_lpa & PCS_MIIADV_FD)
1122 if (netif_msg_link(gp))
1123 printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n",
1124 gp->dev->name, speed, (full_duplex ? "full" : "half"));
1126 val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU);
1128 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
1130 /* MAC_TXCFG_NBO must be zero. */
1132 writel(val, gp->regs + MAC_TXCFG);
1134 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
1136 (gp->phy_type == phy_mii_mdio0 ||
1137 gp->phy_type == phy_mii_mdio1)) {
1138 val |= MAC_XIFCFG_DISE;
1139 } else if (full_duplex) {
1140 val |= MAC_XIFCFG_FLED;
1143 if (speed == SPEED_1000)
1144 val |= (MAC_XIFCFG_GMII);
1146 writel(val, gp->regs + MAC_XIFCFG);
1148 /* If gigabit and half-duplex, enable carrier extension
1149 * mode. Else, disable it.
1151 if (speed == SPEED_1000 && !full_duplex) {
1152 val = readl(gp->regs + MAC_TXCFG);
1153 writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1155 val = readl(gp->regs + MAC_RXCFG);
1156 writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1158 val = readl(gp->regs + MAC_TXCFG);
1159 writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG);
1161 val = readl(gp->regs + MAC_RXCFG);
1162 writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG);
1165 if (gp->phy_type == phy_serialink ||
1166 gp->phy_type == phy_serdes) {
1167 u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
1169 if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP))
1173 if (netif_msg_link(gp)) {
1175 printk(KERN_INFO "%s: Pause is enabled "
1176 "(rxfifo: %d off: %d on: %d)\n",
1182 printk(KERN_INFO "%s: Pause is disabled\n",
1188 writel(512, gp->regs + MAC_STIME);
1190 writel(64, gp->regs + MAC_STIME);
1191 val = readl(gp->regs + MAC_MCCFG);
1193 val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1195 val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE);
1196 writel(val, gp->regs + MAC_MCCFG);
1203 /* Must be invoked under gp->lock. */
1204 static int gem_mdio_link_not_up(struct gem *gp)
1206 if (gp->lstate == link_force_ret) {
1207 if (netif_msg_link(gp))
1208 printk(KERN_INFO "%s: Autoneg failed again, keeping"
1209 " forced mode\n", gp->dev->name);
1210 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii,
1211 gp->last_forced_speed, DUPLEX_HALF);
1212 gp->timer_ticks = 5;
1213 gp->lstate = link_force_ok;
1214 } else if (gp->lstate == link_aneg) {
1215 if (netif_msg_link(gp))
1216 printk(KERN_INFO "%s: switching to forced 100bt\n",
1218 /* Try forced modes. */
1219 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100,
1221 gp->timer_ticks = 5;
1222 gp->lstate = link_force_try;
1224 /* Downgrade from 100 to 10 Mbps if necessary.
1225 * If already at 10Mbps, warn user about the
1226 * situation every 10 ticks.
1228 if (gp->phy_mii.speed == SPEED_100) {
1229 gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10,
1231 gp->timer_ticks = 5;
1232 if (netif_msg_link(gp))
1233 printk(KERN_INFO "%s: switching to forced 10bt\n",
1241 static void gem_init_rings(struct gem *);
1242 static void gem_init_hw(struct gem *, int);
1244 static void gem_reset_task(void *data)
1246 struct gem *gp = (struct gem *) data;
1248 /* The link went down, we reset the ring, but keep
1249 * DMA stopped. Todo: Use this function for reset
1253 spin_lock_irq(&gp->lock);
1255 if (gp->hw_running && gp->opened) {
1256 /* Make sure we don't get interrupts or tx packets */
1257 netif_stop_queue(gp->dev);
1259 writel(0xffffffff, gp->regs + GREG_IMASK);
1261 /* Reset the chip & rings */
1266 (gp->reset_task_pending == 2));
1268 netif_wake_queue(gp->dev);
1270 gp->reset_task_pending = 0;
1272 spin_unlock_irq(&gp->lock);
1275 static void gem_link_timer(unsigned long data)
1277 struct gem *gp = (struct gem *) data;
1278 int restart_aneg = 0;
1280 if (!gp->hw_running)
1283 spin_lock_irq(&gp->lock);
1285 /* If the link of task is still pending, we just
1286 * reschedule the link timer
1288 if (gp->reset_task_pending)
1291 if (gp->phy_type == phy_serialink ||
1292 gp->phy_type == phy_serdes) {
1293 u32 val = readl(gp->regs + PCS_MIISTAT);
1295 if (!(val & PCS_MIISTAT_LS))
1296 val = readl(gp->regs + PCS_MIISTAT);
1298 if ((val & PCS_MIISTAT_LS) != 0) {
1299 gp->lstate = link_up;
1300 netif_carrier_on(gp->dev);
1302 (void)gem_set_link_modes(gp);
1306 if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) {
1307 /* Ok, here we got a link. If we had it due to a forced
1308 * fallback, and we were configured for autoneg, we do
1309 * retry a short autoneg pass. If you know your hub is
1310 * broken, use ethtool ;)
1312 if (gp->lstate == link_force_try && gp->want_autoneg) {
1313 gp->lstate = link_force_ret;
1314 gp->last_forced_speed = gp->phy_mii.speed;
1315 gp->timer_ticks = 5;
1316 if (netif_msg_link(gp))
1317 printk(KERN_INFO "%s: Got link after fallback, retrying"
1318 " autoneg once...\n", gp->dev->name);
1319 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising);
1320 } else if (gp->lstate != link_up) {
1321 gp->lstate = link_up;
1322 netif_carrier_on(gp->dev);
1323 if (gp->opened && gem_set_link_modes(gp))
1327 /* If the link was previously up, we restart the
1330 if (gp->lstate == link_up) {
1331 gp->lstate = link_down;
1332 if (netif_msg_link(gp))
1333 printk(KERN_INFO "%s: Link down\n",
1335 netif_carrier_off(gp->dev);
1336 gp->reset_task_pending = 2;
1337 schedule_task(&gp->reset_task);
1339 } else if (++gp->timer_ticks > 10) {
1340 if (found_mii_phy(gp))
1341 restart_aneg = gem_mdio_link_not_up(gp);
1347 gem_begin_auto_negotiation(gp, NULL);
1351 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10));
1353 spin_unlock_irq(&gp->lock);
1356 /* Must be invoked under gp->lock. */
1357 static void gem_clean_rings(struct gem *gp)
1359 struct gem_init_block *gb = gp->init_block;
1360 struct sk_buff *skb;
1362 dma_addr_t dma_addr;
1364 for (i = 0; i < RX_RING_SIZE; i++) {
1365 struct gem_rxd *rxd;
1368 if (gp->rx_skbs[i] != NULL) {
1369 skb = gp->rx_skbs[i];
1370 dma_addr = le64_to_cpu(rxd->buffer);
1371 pci_unmap_page(gp->pdev, dma_addr,
1372 RX_BUF_ALLOC_SIZE(gp),
1373 PCI_DMA_FROMDEVICE);
1374 dev_kfree_skb_any(skb);
1375 gp->rx_skbs[i] = NULL;
1377 rxd->status_word = 0;
1381 for (i = 0; i < TX_RING_SIZE; i++) {
1382 if (gp->tx_skbs[i] != NULL) {
1383 struct gem_txd *txd;
1386 skb = gp->tx_skbs[i];
1387 gp->tx_skbs[i] = NULL;
1389 for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
1390 int ent = i & (TX_RING_SIZE - 1);
1392 txd = &gb->txd[ent];
1393 dma_addr = le64_to_cpu(txd->buffer);
1394 pci_unmap_page(gp->pdev, dma_addr,
1395 le64_to_cpu(txd->control_word) &
1396 TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
1398 if (frag != skb_shinfo(skb)->nr_frags)
1401 dev_kfree_skb_any(skb);
1406 /* Must be invoked under gp->lock. */
1407 static void gem_init_rings(struct gem *gp)
1409 struct gem_init_block *gb = gp->init_block;
1410 struct net_device *dev = gp->dev;
1412 dma_addr_t dma_addr;
1414 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0;
1416 gem_clean_rings(gp);
1418 for (i = 0; i < RX_RING_SIZE; i++) {
1419 struct sk_buff *skb;
1420 struct gem_rxd *rxd = &gb->rxd[i];
1422 skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC);
1425 rxd->status_word = 0;
1429 gp->rx_skbs[i] = skb;
1431 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET));
1432 dma_addr = pci_map_page(gp->pdev,
1433 virt_to_page(skb->data),
1434 ((unsigned long) skb->data &
1436 RX_BUF_ALLOC_SIZE(gp),
1437 PCI_DMA_FROMDEVICE);
1438 rxd->buffer = cpu_to_le64(dma_addr);
1439 rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
1440 skb_reserve(skb, RX_OFFSET);
1443 for (i = 0; i < TX_RING_SIZE; i++) {
1444 struct gem_txd *txd = &gb->txd[i];
1446 txd->control_word = 0;
1451 /* Must be invoked under gp->lock. */
1452 static void gem_init_phy(struct gem *gp)
1456 /* Revert MIF CFG setting done on stop_phy */
1457 mifcfg = readl(gp->regs + MIF_CFG);
1458 mifcfg &= ~MIF_CFG_BBMODE;
1459 writel(mifcfg, gp->regs + MIF_CFG);
1461 #ifdef CONFIG_ALL_PPC
1462 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
1465 /* Those delay sucks, the HW seem to love them though, I'll
1466 * serisouly consider breaking some locks here to be able
1467 * to schedule instead
1469 pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0);
1471 for (j = 0; j < 3; j++) {
1472 /* Some PHYs used by apple have problem getting back to us,
1473 * we _know_ it's actually at addr 0, that's a hack, but
1474 * it helps to do that reset now. I suspect some motherboards
1475 * don't wire the PHY reset line properly, thus the PHY doesn't
1476 * come back with the above pmac_call_feature.
1478 gp->mii_phy_addr = 0;
1479 phy_write(gp, MII_BMCR, BMCR_RESET);
1480 /* We should probably break some locks here and schedule... */
1482 for (i = 0; i < 32; i++) {
1483 gp->mii_phy_addr = i;
1484 if (phy_read(gp, MII_BMCR) != 0xffff)
1488 printk(KERN_WARNING "%s: GMAC PHY not responding !\n",
1490 gp->mii_phy_addr = 0;
1495 #endif /* CONFIG_ALL_PPC */
1497 if (gp->pdev->vendor == PCI_VENDOR_ID_SUN &&
1498 gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) {
1501 /* Init datapath mode register. */
1502 if (gp->phy_type == phy_mii_mdio0 ||
1503 gp->phy_type == phy_mii_mdio1) {
1504 val = PCS_DMODE_MGM;
1505 } else if (gp->phy_type == phy_serialink) {
1506 val = PCS_DMODE_SM | PCS_DMODE_GMOE;
1508 val = PCS_DMODE_ESM;
1511 writel(val, gp->regs + PCS_DMODE);
1514 if (gp->phy_type == phy_mii_mdio0 ||
1515 gp->phy_type == phy_mii_mdio1) {
1516 // XXX check for errors
1517 mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr);
1519 gp->gigabit_capable = gp->phy_mii.def && (gp->phy_mii.def->features &
1520 (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
1522 if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
1523 gp->phy_mii.def->ops->init(&gp->phy_mii);
1528 /* Reset PCS unit. */
1529 val = readl(gp->regs + PCS_MIICTRL);
1530 val |= PCS_MIICTRL_RST;
1531 writeb(val, gp->regs + PCS_MIICTRL);
1534 while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
1540 printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
1543 /* Make sure PCS is disabled while changing advertisement
1546 val = readl(gp->regs + PCS_CFG);
1547 val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
1548 writel(val, gp->regs + PCS_CFG);
1550 /* Advertise all capabilities except assymetric
1553 val = readl(gp->regs + PCS_MIIADV);
1554 val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
1555 PCS_MIIADV_SP | PCS_MIIADV_AP);
1556 writel(val, gp->regs + PCS_MIIADV);
1558 /* Enable and restart auto-negotiation, disable wrapback/loopback,
1559 * and re-enable PCS.
1561 val = readl(gp->regs + PCS_MIICTRL);
1562 val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
1563 val &= ~PCS_MIICTRL_WB;
1564 writel(val, gp->regs + PCS_MIICTRL);
1566 val = readl(gp->regs + PCS_CFG);
1567 val |= PCS_CFG_ENABLE;
1568 writel(val, gp->regs + PCS_CFG);
1570 /* Make sure serialink loopback is off. The meaning
1571 * of this bit is logically inverted based upon whether
1572 * you are in Serialink or SERDES mode.
1574 val = readl(gp->regs + PCS_SCTRL);
1575 if (gp->phy_type == phy_serialink)
1576 val &= ~PCS_SCTRL_LOOP;
1578 val |= PCS_SCTRL_LOOP;
1579 writel(val, gp->regs + PCS_SCTRL);
1580 gp->gigabit_capable = 1;
1584 /* Must be invoked under gp->lock. */
1585 static void gem_init_dma(struct gem *gp)
1587 u64 desc_dma = (u64) gp->gblock_dvma;
1590 val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE);
1591 writel(val, gp->regs + TXDMA_CFG);
1593 writel(desc_dma >> 32, gp->regs + TXDMA_DBHI);
1594 writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW);
1595 desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd));
1597 writel(0, gp->regs + TXDMA_KICK);
1599 val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
1600 ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
1601 writel(val, gp->regs + RXDMA_CFG);
1603 writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
1604 writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
1606 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
1608 val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF);
1609 val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON);
1610 writel(val, gp->regs + RXDMA_PTHRESH);
1612 if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
1613 writel(((5 & RXDMA_BLANK_IPKTS) |
1614 ((8 << 12) & RXDMA_BLANK_ITIME)),
1615 gp->regs + RXDMA_BLANK);
1617 writel(((5 & RXDMA_BLANK_IPKTS) |
1618 ((4 << 12) & RXDMA_BLANK_ITIME)),
1619 gp->regs + RXDMA_BLANK);
1622 /* Must be invoked under gp->lock. */
1624 gem_setup_multicast(struct gem *gp)
1629 if ((gp->dev->flags & IFF_ALLMULTI) ||
1630 (gp->dev->mc_count > 256)) {
1631 for (i=0; i<16; i++)
1632 writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
1633 rxcfg |= MAC_RXCFG_HFE;
1634 } else if (gp->dev->flags & IFF_PROMISC) {
1635 rxcfg |= MAC_RXCFG_PROM;
1639 struct dev_mc_list *dmi = gp->dev->mc_list;
1642 for (i = 0; i < 16; i++)
1645 for (i = 0; i < gp->dev->mc_count; i++) {
1646 char *addrs = dmi->dmi_addr;
1653 crc = ether_crc_le(6, addrs);
1655 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
1657 for (i=0; i<16; i++)
1658 writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2));
1659 rxcfg |= MAC_RXCFG_HFE;
1665 /* Must be invoked under gp->lock. */
1666 static void gem_init_mac(struct gem *gp)
1668 unsigned char *e = &gp->dev->dev_addr[0];
1670 writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
1672 writel(0x00, gp->regs + MAC_IPG0);
1673 writel(0x08, gp->regs + MAC_IPG1);
1674 writel(0x04, gp->regs + MAC_IPG2);
1675 writel(0x40, gp->regs + MAC_STIME);
1676 writel(0x40, gp->regs + MAC_MINFSZ);
1678 /* Ethernet payload + header + FCS + optional VLAN tag. */
1679 writel(0x20000000 | (gp->dev->mtu + ETH_HLEN + 4 + 4), gp->regs + MAC_MAXFSZ);
1681 writel(0x07, gp->regs + MAC_PASIZE);
1682 writel(0x04, gp->regs + MAC_JAMSIZE);
1683 writel(0x10, gp->regs + MAC_ATTLIM);
1684 writel(0x8808, gp->regs + MAC_MCTYPE);
1686 writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED);
1688 writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0);
1689 writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1);
1690 writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2);
1692 writel(0, gp->regs + MAC_ADDR3);
1693 writel(0, gp->regs + MAC_ADDR4);
1694 writel(0, gp->regs + MAC_ADDR5);
1696 writel(0x0001, gp->regs + MAC_ADDR6);
1697 writel(0xc200, gp->regs + MAC_ADDR7);
1698 writel(0x0180, gp->regs + MAC_ADDR8);
1700 writel(0, gp->regs + MAC_AFILT0);
1701 writel(0, gp->regs + MAC_AFILT1);
1702 writel(0, gp->regs + MAC_AFILT2);
1703 writel(0, gp->regs + MAC_AF21MSK);
1704 writel(0, gp->regs + MAC_AF0MSK);
1706 gp->mac_rx_cfg = gem_setup_multicast(gp);
1708 writel(0, gp->regs + MAC_NCOLL);
1709 writel(0, gp->regs + MAC_FASUCC);
1710 writel(0, gp->regs + MAC_ECOLL);
1711 writel(0, gp->regs + MAC_LCOLL);
1712 writel(0, gp->regs + MAC_DTIMER);
1713 writel(0, gp->regs + MAC_PATMPS);
1714 writel(0, gp->regs + MAC_RFCTR);
1715 writel(0, gp->regs + MAC_LERR);
1716 writel(0, gp->regs + MAC_AERR);
1717 writel(0, gp->regs + MAC_FCSERR);
1718 writel(0, gp->regs + MAC_RXCVERR);
1720 /* Clear RX/TX/MAC/XIF config, we will set these up and enable
1721 * them once a link is established.
1723 writel(0, gp->regs + MAC_TXCFG);
1724 writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG);
1725 writel(0, gp->regs + MAC_MCCFG);
1726 writel(0, gp->regs + MAC_XIFCFG);
1728 /* Setup MAC interrupts. We want to get all of the interesting
1729 * counter expiration events, but we do not want to hear about
1730 * normal rx/tx as the DMA engine tells us that.
1732 writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK);
1733 writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK);
1735 /* Don't enable even the PAUSE interrupts for now, we
1736 * make no use of those events other than to record them.
1738 writel(0xffffffff, gp->regs + MAC_MCMASK);
1741 /* Must be invoked under gp->lock. */
1742 static void gem_init_pause_thresholds(struct gem *gp)
1744 /* Calculate pause thresholds. Setting the OFF threshold to the
1745 * full RX fifo size effectively disables PAUSE generation which
1746 * is what we do for 10/100 only GEMs which have FIFOs too small
1747 * to make real gains from PAUSE.
1749 if (gp->rx_fifo_sz <= (2 * 1024)) {
1750 gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz;
1752 int max_frame = (gp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63;
1753 int off = (gp->rx_fifo_sz - (max_frame * 2));
1754 int on = off - max_frame;
1756 gp->rx_pause_off = off;
1757 gp->rx_pause_on = on;
1764 #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA)
1765 cfg |= GREG_CFG_IBURST;
1767 cfg |= ((31 << 1) & GREG_CFG_TXDMALIM);
1768 cfg |= ((31 << 6) & GREG_CFG_RXDMALIM);
1769 writel(cfg, gp->regs + GREG_CFG);
1773 static int gem_check_invariants(struct gem *gp)
1775 struct pci_dev *pdev = gp->pdev;
1778 /* On Apple's sungem, we can't rely on registers as the chip
1779 * was been powered down by the firmware. The PHY is looked
1782 if (pdev->vendor == PCI_VENDOR_ID_APPLE) {
1783 gp->phy_type = phy_mii_mdio0;
1784 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
1785 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
1790 mif_cfg = readl(gp->regs + MIF_CFG);
1792 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
1793 pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) {
1794 /* One of the MII PHYs _must_ be present
1795 * as this chip has no gigabit PHY.
1797 if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) {
1798 printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n",
1804 /* Determine initial PHY interface type guess. MDIO1 is the
1805 * external PHY and thus takes precedence over MDIO0.
1808 if (mif_cfg & MIF_CFG_MDI1) {
1809 gp->phy_type = phy_mii_mdio1;
1810 mif_cfg |= MIF_CFG_PSELECT;
1811 writel(mif_cfg, gp->regs + MIF_CFG);
1812 } else if (mif_cfg & MIF_CFG_MDI0) {
1813 gp->phy_type = phy_mii_mdio0;
1814 mif_cfg &= ~MIF_CFG_PSELECT;
1815 writel(mif_cfg, gp->regs + MIF_CFG);
1817 gp->phy_type = phy_serialink;
1819 if (gp->phy_type == phy_mii_mdio1 ||
1820 gp->phy_type == phy_mii_mdio0) {
1823 for (i = 0; i < 32; i++) {
1824 gp->mii_phy_addr = i;
1825 if (phy_read(gp, MII_BMCR) != 0xffff)
1829 if (pdev->device != PCI_DEVICE_ID_SUN_GEM) {
1830 printk(KERN_ERR PFX "RIO MII phy will not respond.\n");
1833 gp->phy_type = phy_serdes;
1837 /* Fetch the FIFO configurations now too. */
1838 gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64;
1839 gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64;
1841 if (pdev->vendor == PCI_VENDOR_ID_SUN) {
1842 if (pdev->device == PCI_DEVICE_ID_SUN_GEM) {
1843 if (gp->tx_fifo_sz != (9 * 1024) ||
1844 gp->rx_fifo_sz != (20 * 1024)) {
1845 printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n",
1846 gp->tx_fifo_sz, gp->rx_fifo_sz);
1851 if (gp->tx_fifo_sz != (2 * 1024) ||
1852 gp->rx_fifo_sz != (2 * 1024)) {
1853 printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n",
1854 gp->tx_fifo_sz, gp->rx_fifo_sz);
1857 gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT;
1864 /* Must be invoked under gp->lock. */
1865 static void gem_init_hw(struct gem *gp, int restart_link)
1867 /* On Apple's gmac, I initialize the PHY only after
1868 * setting up the chip. It appears the gigabit PHYs
1869 * don't quite like beeing talked to on the GII when
1870 * the chip is not running, I suspect it might not
1871 * be clocked at that point. --BenH
1875 gem_init_pause_thresholds(gp);
1880 /* Default aneg parameters */
1881 gp->timer_ticks = 0;
1882 gp->lstate = link_down;
1883 netif_carrier_off(gp->dev);
1885 /* Can I advertise gigabit here ? I'd need BCM PHY docs... */
1886 gem_begin_auto_negotiation(gp, NULL);
1888 if (gp->lstate == link_up) {
1889 netif_carrier_on(gp->dev);
1890 gem_set_link_modes(gp);
1895 #ifdef CONFIG_ALL_PPC
1896 /* Enable the chip's clock and make sure it's config space is
1897 * setup properly. There appear to be no need to restore the
1900 static void gem_apple_powerup(struct gem *gp)
1905 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1);
1907 current->state = TASK_UNINTERRUPTIBLE;
1908 schedule_timeout((21 * HZ) / 1000);
1910 pci_read_config_word(gp->pdev, PCI_COMMAND, &cmd);
1911 cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
1912 pci_write_config_word(gp->pdev, PCI_COMMAND, cmd);
1913 pci_write_config_byte(gp->pdev, PCI_LATENCY_TIMER, 6);
1914 pci_write_config_byte(gp->pdev, PCI_CACHE_LINE_SIZE, 8);
1918 mif_cfg = readl(gp->regs + MIF_CFG);
1919 mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1);
1920 mif_cfg |= MIF_CFG_MDI0;
1921 writel(mif_cfg, gp->regs + MIF_CFG);
1922 writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE);
1923 writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG);
1928 /* Turn off the chip's clock */
1929 static void gem_apple_powerdown(struct gem *gp)
1931 pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0);
1934 #endif /* CONFIG_ALL_PPC */
1936 /* Must be invoked under gp->lock. */
1937 static void gem_stop_phy(struct gem *gp)
1941 /* Make sure we aren't polling PHY status change. We
1942 * don't currently use that feature though
1944 mifcfg = readl(gp->regs + MIF_CFG);
1945 mifcfg &= ~MIF_CFG_POLL;
1946 writel(mifcfg, gp->regs + MIF_CFG);
1948 if (gp->wake_on_lan) {
1949 /* Setup wake-on-lan */
1951 writel(0, gp->regs + MAC_RXCFG);
1952 writel(0, gp->regs + MAC_TXCFG);
1953 writel(0, gp->regs + MAC_XIFCFG);
1954 writel(0, gp->regs + TXDMA_CFG);
1955 writel(0, gp->regs + RXDMA_CFG);
1957 if (!gp->wake_on_lan) {
1959 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST);
1960 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST);
1963 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend)
1964 gp->phy_mii.def->ops->suspend(&gp->phy_mii, 0 /* wake on lan options */);
1966 if (!gp->wake_on_lan) {
1967 /* According to Apple, we must set the MDIO pins to this begnign
1968 * state or we may 1) eat more current, 2) damage some PHYs
1970 writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG);
1971 writel(0, gp->regs + MIF_BBCLK);
1972 writel(0, gp->regs + MIF_BBDATA);
1973 writel(0, gp->regs + MIF_BBOENAB);
1974 writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG);
1975 (void) readl(gp->regs + MAC_XIFCFG);
1979 /* Shut down the chip, must be called with pm_sem held. */
1980 static void gem_shutdown(struct gem *gp)
1982 /* Make us not-running to avoid timers respawning */
1985 /* Stop the link timer */
1986 del_timer_sync(&gp->link_timer);
1988 /* Stop the reset task */
1989 while (gp->reset_task_pending)
1992 /* Actually stop the chip */
1993 spin_lock_irq(&gp->lock);
1994 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
1997 spin_unlock_irq(&gp->lock);
1999 #ifdef CONFIG_ALL_PPC
2000 /* Power down the chip */
2001 gem_apple_powerdown(gp);
2002 #endif /* CONFIG_ALL_PPC */
2006 spin_unlock_irq(&gp->lock);
2010 static void gem_pm_task(void *data)
2012 struct gem *gp = (struct gem *) data;
2014 /* We assume if we can't lock the pm_sem, then open() was
2015 * called again (or suspend()), and we can safely ignore
2018 if (down_trylock(&gp->pm_sem))
2021 /* Driver was re-opened or already shut down */
2022 if (gp->opened || !gp->hw_running) {
2032 static void gem_pm_timer(unsigned long data)
2034 struct gem *gp = (struct gem *) data;
2036 schedule_task(&gp->pm_task);
2039 static int gem_open(struct net_device *dev)
2041 struct gem *gp = dev->priv;
2046 hw_was_up = gp->hw_running;
2048 /* Stop the PM timer/task */
2049 del_timer(&gp->pm_timer);
2050 flush_scheduled_tasks();
2052 /* The power-management semaphore protects the hw_running
2053 * etc. state so it is safe to do this bit without gp->lock
2055 if (!gp->hw_running) {
2056 #ifdef CONFIG_ALL_PPC
2057 /* First, we need to bring up the chip */
2058 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
2059 gem_apple_powerup(gp);
2060 gem_check_invariants(gp);
2062 #endif /* CONFIG_ALL_PPC */
2064 /* Reset the chip */
2065 spin_lock_irq(&gp->lock);
2067 spin_unlock_irq(&gp->lock);
2072 spin_lock_irq(&gp->lock);
2074 /* We can now request the interrupt as we know it's masked
2077 if (request_irq(gp->pdev->irq, gem_interrupt,
2078 SA_SHIRQ, dev->name, (void *)dev)) {
2079 spin_unlock_irq(&gp->lock);
2081 printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
2083 #ifdef CONFIG_ALL_PPC
2084 if (!hw_was_up && gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
2085 gem_apple_powerdown(gp);
2086 #endif /* CONFIG_ALL_PPC */
2087 /* Fire the PM timer that will shut us down in about 10 seconds */
2088 gp->pm_timer.expires = jiffies + 10*HZ;
2089 add_timer(&gp->pm_timer);
2095 /* Allocate & setup ring buffers */
2098 /* Init & setup chip hardware */
2099 gem_init_hw(gp, !hw_was_up);
2103 spin_unlock_irq(&gp->lock);
2110 static int gem_close(struct net_device *dev)
2112 struct gem *gp = dev->priv;
2114 /* Make sure we don't get distracted by suspend/resume */
2117 /* Stop traffic, mark us closed */
2118 spin_lock_irq(&gp->lock);
2121 writel(0xffffffff, gp->regs + GREG_IMASK);
2122 netif_stop_queue(dev);
2127 /* Get rid of rings */
2128 gem_clean_rings(gp);
2130 /* Bye, the pm timer will finish the job */
2131 free_irq(gp->pdev->irq, (void *) dev);
2133 spin_unlock_irq(&gp->lock);
2135 /* Fire the PM timer that will shut us down in about 10 seconds */
2136 gp->pm_timer.expires = jiffies + 10*HZ;
2137 add_timer(&gp->pm_timer);
2145 static int gem_suspend(struct pci_dev *pdev, u32 state)
2147 struct net_device *dev = pci_get_drvdata(pdev);
2148 struct gem *gp = dev->priv;
2150 /* We hold the PM semaphore during entire driver
2155 printk(KERN_INFO "%s: suspending, WakeOnLan %s\n",
2156 dev->name, gp->wake_on_lan ? "enabled" : "disabled");
2158 /* If the driver is opened, we stop the DMA */
2160 spin_lock_irq(&gp->lock);
2162 /* Stop traffic, mark us closed */
2163 netif_device_detach(dev);
2165 writel(0xffffffff, gp->regs + GREG_IMASK);
2170 /* Get rid of ring buffers */
2171 gem_clean_rings(gp);
2173 spin_unlock_irq(&gp->lock);
2175 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
2176 disable_irq(gp->pdev->irq);
2179 if (gp->hw_running) {
2180 /* Kill PM timer if any */
2181 del_timer_sync(&gp->pm_timer);
2182 flush_scheduled_tasks();
2190 static int gem_resume(struct pci_dev *pdev)
2192 struct net_device *dev = pci_get_drvdata(pdev);
2193 struct gem *gp = dev->priv;
2195 printk(KERN_INFO "%s: resuming\n", dev->name);
2198 #ifdef CONFIG_ALL_PPC
2199 /* First, we need to bring up the chip */
2200 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
2201 gem_apple_powerup(gp);
2202 gem_check_invariants(gp);
2204 #endif /* CONFIG_ALL_PPC */
2205 spin_lock_irq(&gp->lock);
2212 spin_unlock_irq(&gp->lock);
2214 netif_device_attach(dev);
2215 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE)
2216 enable_irq(gp->pdev->irq);
2222 #endif /* CONFIG_PM */
2224 static struct net_device_stats *gem_get_stats(struct net_device *dev)
2226 struct gem *gp = dev->priv;
2227 struct net_device_stats *stats = &gp->net_stats;
2229 spin_lock_irq(&gp->lock);
2231 if (gp->hw_running) {
2232 stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
2233 writel(0, gp->regs + MAC_FCSERR);
2235 stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
2236 writel(0, gp->regs + MAC_AERR);
2238 stats->rx_length_errors += readl(gp->regs + MAC_LERR);
2239 writel(0, gp->regs + MAC_LERR);
2241 stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
2242 stats->collisions +=
2243 (readl(gp->regs + MAC_ECOLL) +
2244 readl(gp->regs + MAC_LCOLL));
2245 writel(0, gp->regs + MAC_ECOLL);
2246 writel(0, gp->regs + MAC_LCOLL);
2249 spin_unlock_irq(&gp->lock);
2251 return &gp->net_stats;
2254 static void gem_set_multicast(struct net_device *dev)
2256 struct gem *gp = dev->priv;
2257 u32 rxcfg, rxcfg_new;
2260 if (!gp->hw_running)
2263 spin_lock_irq(&gp->lock);
2265 netif_stop_queue(dev);
2267 rxcfg = readl(gp->regs + MAC_RXCFG);
2268 gp->mac_rx_cfg = rxcfg_new = gem_setup_multicast(gp);
2270 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
2271 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
2277 rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE);
2280 writel(rxcfg, gp->regs + MAC_RXCFG);
2282 netif_wake_queue(dev);
2284 spin_unlock_irq(&gp->lock);
2287 /* Eventually add support for changing the advertisement
2290 static int gem_ethtool_ioctl(struct net_device *dev, void *ep_user)
2292 struct gem *gp = dev->priv;
2293 struct ethtool_cmd ecmd;
2295 if (copy_from_user(&ecmd, ep_user, sizeof(ecmd)))
2299 case ETHTOOL_GDRVINFO: {
2300 struct ethtool_drvinfo info = { cmd: ETHTOOL_GDRVINFO };
2302 strncpy(info.driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
2303 strncpy(info.version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
2304 info.fw_version[0] = '\0';
2305 strncpy(info.bus_info, gp->pdev->slot_name, ETHTOOL_BUSINFO_LEN);
2306 info.regdump_len = 0; /*SUNGEM_NREGS;*/
2308 if (copy_to_user(ep_user, &info, sizeof(info)))
2315 if (gp->phy_type == phy_mii_mdio0 ||
2316 gp->phy_type == phy_mii_mdio1) {
2317 if (gp->phy_mii.def)
2318 ecmd.supported = gp->phy_mii.def->features;
2320 ecmd.supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
2322 /* XXX hardcoded stuff for now */
2323 ecmd.port = PORT_MII;
2324 ecmd.transceiver = XCVR_EXTERNAL;
2325 ecmd.phy_address = 0; /* XXX fixed PHYAD */
2327 /* Record PHY settings if HW is on. */
2328 spin_lock_irq(&gp->lock);
2329 ecmd.autoneg = gp->want_autoneg;
2330 ecmd.speed = gp->phy_mii.speed;
2331 ecmd.duplex = gp->phy_mii.duplex;
2332 spin_unlock_irq(&gp->lock);
2333 } else { // XXX PCS ?
2335 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
2336 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2339 if (copy_to_user(ep_user, &ecmd, sizeof(ecmd)))
2344 if (!capable(CAP_NET_ADMIN))
2347 /* Verify the settings we care about. */
2348 if (ecmd.autoneg != AUTONEG_ENABLE &&
2349 ecmd.autoneg != AUTONEG_DISABLE)
2352 if (ecmd.autoneg == AUTONEG_ENABLE &&
2353 ecmd.advertising == 0)
2356 if (ecmd.autoneg == AUTONEG_DISABLE &&
2357 ((ecmd.speed != SPEED_1000 &&
2358 ecmd.speed != SPEED_100 &&
2359 ecmd.speed != SPEED_10) ||
2360 (ecmd.duplex != DUPLEX_HALF &&
2361 ecmd.duplex != DUPLEX_FULL)))
2364 if (ecmd.autoneg == AUTONEG_DISABLE &&
2365 ecmd.speed == SPEED_1000 &&
2366 gp->gigabit_capable == 0)
2369 /* Apply settings and restart link process. */
2370 spin_lock_irq(&gp->lock);
2371 gem_begin_auto_negotiation(gp, &ecmd);
2372 spin_unlock_irq(&gp->lock);
2376 case ETHTOOL_NWAY_RST:
2377 if (!gp->want_autoneg)
2380 /* Restart link process. */
2381 spin_lock_irq(&gp->lock);
2382 gem_begin_auto_negotiation(gp, NULL);
2383 spin_unlock_irq(&gp->lock);
2391 /* get link status */
2392 case ETHTOOL_GLINK: {
2393 struct ethtool_value edata = { cmd: ETHTOOL_GLINK };
2395 edata.data = (gp->lstate == link_up);
2396 if (copy_to_user(ep_user, &edata, sizeof(edata)))
2401 /* get message-level */
2402 case ETHTOOL_GMSGLVL: {
2403 struct ethtool_value edata = { cmd: ETHTOOL_GMSGLVL };
2405 edata.data = gp->msg_enable;
2406 if (copy_to_user(ep_user, &edata, sizeof(edata)))
2411 /* set message-level */
2412 case ETHTOOL_SMSGLVL: {
2413 struct ethtool_value edata;
2415 if (copy_from_user(&edata, ep_user, sizeof(edata)))
2417 gp->msg_enable = edata.data;
2422 case ETHTOOL_GREGS: {
2423 struct ethtool_regs regs;
2427 if (copy_from_user(®s, useraddr, sizeof(regs)))
2430 if (regs.len > SUNGEM_NREGS) {
2431 regs.len = SUNGEM_NREGS;
2434 if (copy_to_user(useraddr, ®s, sizeof(regs)))
2437 if (!gp->hw_running)
2439 useraddr += offsetof(struct ethtool_regs, data);
2441 /* Use kmalloc to avoid bloating the stack */
2442 regbuf = kmalloc(4 * SUNGEM_NREGS, GFP_KERNEL);
2445 spin_lock_irq(&np->lock);
2446 gem_get_regs(gp, regbuf);
2447 spin_unlock_irq(&np->lock);
2449 if (copy_to_user(useraddr, regbuf, regs.len*sizeof(u32)))
2460 static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2462 struct gem *gp = dev->priv;
2463 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
2464 int rc = -EOPNOTSUPP;
2466 /* Hold the PM semaphore while doing ioctl's or we may collide
2467 * with open/close and power management and oops.
2473 rc = gem_ethtool_ioctl(dev, ifr->ifr_data);
2476 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2477 data->phy_id = gp->mii_phy_addr;
2478 /* Fallthrough... */
2480 case SIOCGMIIREG: /* Read MII PHY register. */
2481 if (!gp->hw_running)
2484 data->val_out = __phy_read(gp, data->phy_id & 0x1f, data->reg_num & 0x1f);
2489 case SIOCSMIIREG: /* Write MII PHY register. */
2490 if (!capable(CAP_NET_ADMIN))
2492 else if (!gp->hw_running)
2495 __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
2506 #if (!defined(__sparc__) && !defined(CONFIG_ALL_PPC))
2507 /* Fetch MAC address from vital product data of PCI ROM. */
2508 static void find_eth_addr_in_vpd(void *rom_base, int len, unsigned char *dev_addr)
2512 for (this_offset = 0x20; this_offset < len; this_offset++) {
2513 void *p = rom_base + this_offset;
2516 if (readb(p + 0) != 0x90 ||
2517 readb(p + 1) != 0x00 ||
2518 readb(p + 2) != 0x09 ||
2519 readb(p + 3) != 0x4e ||
2520 readb(p + 4) != 0x41 ||
2521 readb(p + 5) != 0x06)
2527 for (i = 0; i < 6; i++)
2528 dev_addr[i] = readb(p + i);
2533 static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
2538 if (pdev->resource[PCI_ROM_RESOURCE].parent == NULL) {
2539 if (pci_assign_resource(pdev, PCI_ROM_RESOURCE) < 0)
2543 pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_reg_orig);
2544 pci_write_config_dword(pdev, pdev->rom_base_reg,
2545 rom_reg_orig | PCI_ROM_ADDRESS_ENABLE);
2547 p = ioremap(pci_resource_start(pdev, PCI_ROM_RESOURCE), (64 * 1024));
2548 if (p != NULL && readb(p) == 0x55 && readb(p + 1) == 0xaa)
2549 find_eth_addr_in_vpd(p, (64 * 1024), dev_addr);
2554 pci_write_config_dword(pdev, pdev->rom_base_reg, rom_reg_orig);
2558 /* Sun MAC prefix then 3 random bytes. */
2562 get_random_bytes(dev_addr + 3, 3);
2565 #endif /* not Sparc and not PPC */
2567 static int __devinit gem_get_device_address(struct gem *gp)
2569 #if defined(__sparc__) || defined(CONFIG_ALL_PPC)
2570 struct net_device *dev = gp->dev;
2573 #if defined(__sparc__)
2574 struct pci_dev *pdev = gp->pdev;
2575 struct pcidev_cookie *pcp = pdev->sysdata;
2579 node = pcp->prom_node;
2580 if (prom_getproplen(node, "local-mac-address") == 6)
2581 prom_getproperty(node, "local-mac-address",
2587 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
2588 #elif defined(CONFIG_ALL_PPC)
2589 unsigned char *addr;
2591 addr = get_property(gp->of_node, "local-mac-address", NULL);
2594 printk(KERN_ERR "%s: can't get mac-address\n", dev->name);
2597 memcpy(dev->dev_addr, addr, MAX_ADDR_LEN);
2599 get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
2604 static int __devinit gem_init_one(struct pci_dev *pdev,
2605 const struct pci_device_id *ent)
2607 static int gem_version_printed = 0;
2608 unsigned long gemreg_base, gemreg_len;
2609 struct net_device *dev;
2611 int i, err, pci_using_dac;
2613 if (gem_version_printed++ == 0)
2614 printk(KERN_INFO "%s", version);
2616 /* Apple gmac note: during probe, the chip is powered up by
2617 * the arch code to allow the code below to work (and to let
2618 * the chip be probed on the config space. It won't stay powered
2619 * up until the interface is brought up however, so we can't rely
2620 * on register configuration done at this point.
2622 err = pci_enable_device(pdev);
2624 printk(KERN_ERR PFX "Cannot enable MMIO operation, "
2628 pci_set_master(pdev);
2630 /* Configure DMA attributes. */
2632 /* All of the GEM documentation states that 64-bit DMA addressing
2633 * is fully supported and should work just fine. However the
2634 * front end for RIO based GEMs is different and only supports
2635 * 32-bit addressing.
2637 * For now we assume the various PPC GEMs are 32-bit only as well.
2639 if (pdev->vendor == PCI_VENDOR_ID_SUN &&
2640 pdev->device == PCI_DEVICE_ID_SUN_GEM &&
2641 !pci_set_dma_mask(pdev, (u64) 0xffffffffffffffff)) {
2644 err = pci_set_dma_mask(pdev, (u64) 0xffffffff);
2646 printk(KERN_ERR PFX "No usable DMA configuration, "
2653 gemreg_base = pci_resource_start(pdev, 0);
2654 gemreg_len = pci_resource_len(pdev, 0);
2656 if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
2657 printk(KERN_ERR PFX "Cannot find proper PCI device "
2658 "base address, aborting.\n");
2662 dev = alloc_etherdev(sizeof(*gp));
2664 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
2667 SET_MODULE_OWNER(dev);
2671 if (pci_request_regions(pdev, dev->name)) {
2672 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
2674 goto err_out_free_netdev;
2678 dev->base_addr = (long) pdev;
2681 gp->msg_enable = (gem_debug < 0 ? DEFAULT_MSG : gem_debug);
2683 spin_lock_init(&gp->lock);
2684 init_MUTEX(&gp->pm_sem);
2686 init_timer(&gp->link_timer);
2687 gp->link_timer.function = gem_link_timer;
2688 gp->link_timer.data = (unsigned long) gp;
2690 init_timer(&gp->pm_timer);
2691 gp->pm_timer.function = gem_pm_timer;
2692 gp->pm_timer.data = (unsigned long) gp;
2694 INIT_TQUEUE(&gp->pm_task, gem_pm_task, gp);
2695 INIT_TQUEUE(&gp->reset_task, gem_reset_task, gp);
2697 gp->lstate = link_down;
2698 gp->timer_ticks = 0;
2699 netif_carrier_off(dev);
2701 gp->regs = (unsigned long) ioremap(gemreg_base, gemreg_len);
2702 if (gp->regs == 0UL) {
2703 printk(KERN_ERR PFX "Cannot map device registers, "
2705 goto err_out_free_res;
2708 /* On Apple, we power the chip up now in order for check
2709 * invariants to work, but also because the firmware might
2710 * not have properly shut down the PHY.
2712 #ifdef CONFIG_ALL_PPC
2713 gp->of_node = pci_device_to_OF_node(pdev);
2714 if (pdev->vendor == PCI_VENDOR_ID_APPLE)
2715 gem_apple_powerup(gp);
2717 spin_lock_irq(&gp->lock);
2719 spin_unlock_irq(&gp->lock);
2721 /* Fill up the mii_phy structure (even if we won't use it) */
2722 gp->phy_mii.dev = dev;
2723 gp->phy_mii.mdio_read = _phy_read;
2724 gp->phy_mii.mdio_write = _phy_write;
2726 /* Default link parameters */
2727 if (forced_speed != -1 &&
2728 forced_speed != SPEED_10 &&
2729 forced_speed != SPEED_100 &&
2730 forced_speed != SPEED_1000) {
2732 printk(KERN_WARNING "forced_speed argument invalid, reverting to autoneg\n");
2734 if (forced_speed < SPEED_10)
2735 gp->want_autoneg = 1;
2737 gp->want_autoneg = 0;
2738 gp->phy_mii.speed = forced_speed;
2739 if (forced_duplex > 0)
2740 gp->phy_mii.duplex = DUPLEX_FULL;
2742 gp->phy_mii.duplex = DUPLEX_HALF;
2745 if (gem_check_invariants(gp))
2746 goto err_out_iounmap;
2748 /* It is guarenteed that the returned buffer will be at least
2749 * PAGE_SIZE aligned.
2751 gp->init_block = (struct gem_init_block *)
2752 pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
2754 if (!gp->init_block) {
2755 printk(KERN_ERR PFX "Cannot allocate init block, "
2757 goto err_out_iounmap;
2760 if (gem_get_device_address(gp))
2761 goto err_out_free_consistent;
2763 if (register_netdev(dev)) {
2764 printk(KERN_ERR PFX "Cannot register net device, "
2766 goto err_out_free_consistent;
2769 printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet ",
2771 for (i = 0; i < 6; i++)
2772 printk("%2.2x%c", dev->dev_addr[i],
2773 i == 5 ? ' ' : ':');
2776 /* Detect & init PHY, start autoneg */
2777 spin_lock_irq(&gp->lock);
2780 gem_begin_auto_negotiation(gp, NULL);
2781 spin_unlock_irq(&gp->lock);
2783 if (gp->phy_type == phy_mii_mdio0 ||
2784 gp->phy_type == phy_mii_mdio1)
2785 printk(KERN_INFO "%s: Found %s PHY\n", dev->name,
2786 gp->phy_mii.def ? gp->phy_mii.def->name : "no");
2788 pci_set_drvdata(pdev, dev);
2790 dev->open = gem_open;
2791 dev->stop = gem_close;
2792 dev->hard_start_xmit = gem_start_xmit;
2793 dev->get_stats = gem_get_stats;
2794 dev->set_multicast_list = gem_set_multicast;
2795 dev->do_ioctl = gem_ioctl;
2796 dev->tx_timeout = gem_tx_timeout;
2797 dev->watchdog_timeo = 5 * HZ;
2798 dev->change_mtu = gem_change_mtu;
2799 dev->irq = pdev->irq;
2802 /* GEM can do it all... */
2803 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2805 dev->features |= NETIF_F_HIGHDMA;
2807 /* Fire the PM timer that will shut us down in about 10 seconds */
2808 gp->pm_timer.expires = jiffies + 10*HZ;
2809 add_timer(&gp->pm_timer);
2813 err_out_free_consistent:
2814 pci_free_consistent(pdev,
2815 sizeof(struct gem_init_block),
2821 /* Stop the PM timer & task */
2822 del_timer_sync(&gp->pm_timer);
2823 flush_scheduled_tasks();
2828 iounmap((void *) gp->regs);
2831 pci_release_regions(pdev);
2833 err_out_free_netdev:
2840 static void __devexit gem_remove_one(struct pci_dev *pdev)
2842 struct net_device *dev = pci_get_drvdata(pdev);
2845 struct gem *gp = dev->priv;
2847 unregister_netdev(dev);
2850 /* Stop the PM timer & task */
2851 del_timer_sync(&gp->pm_timer);
2852 flush_scheduled_tasks();
2857 pci_free_consistent(pdev,
2858 sizeof(struct gem_init_block),
2861 iounmap((void *) gp->regs);
2862 pci_release_regions(pdev);
2865 pci_set_drvdata(pdev, NULL);
2869 static struct pci_driver gem_driver = {
2870 name: GEM_MODULE_NAME,
2871 id_table: gem_pci_tbl,
2872 probe: gem_init_one,
2873 remove: __devexit_p(gem_remove_one),
2875 suspend: gem_suspend,
2877 #endif /* CONFIG_PM */
2880 static int __init gem_init(void)
2882 return pci_module_init(&gem_driver);
2885 static void __exit gem_cleanup(void)
2887 pci_unregister_driver(&gem_driver);
2890 module_init(gem_init);
2891 module_exit(gem_cleanup);