1 /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
3 Written 1997-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
13 It also supports the Symbios Logic version of the same chip core.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
20 Support and updates available at
21 http://www.scyld.com/network/yellowfin.html
24 Linux kernel changelog:
25 -----------------------
27 LK1.1.1 (jgarzik): Port to 2.4 kernel
30 * Merge in becker version 1.05
34 * Update yellowfin_timer to correctly calculate duplex.
35 (suggested by Manfred Spraul)
37 LK1.1.4 (val@nmt.edu):
38 * Fix three endian-ness bugs
39 * Support dual function SYM53C885E ethernet chip
41 LK1.1.5 (val@nmt.edu):
42 * Fix forced full-duplex bug I introduced
46 #define DRV_NAME "yellowfin"
47 #define DRV_VERSION "1.05+LK1.1.5"
48 #define DRV_RELDATE "May 10, 2001"
50 #define PFX DRV_NAME ": "
52 /* The user-configurable values.
53 These may be modified when a driver module is loaded.*/
55 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
56 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
57 static int max_interrupt_work = 20;
59 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
60 /* System-wide count of bogus-rx frames. */
62 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
63 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
64 #elif YF_NEW /* A future perfect board :->. */
65 static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
66 static int fifo_cfg = 0x0028;
68 static int dma_ctrl = 0x004A0263; /* Constrained by errata */
69 static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
72 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
73 Setting to > 1514 effectively disables this feature. */
74 static int rx_copybreak;
76 /* Used to pass the media type, etc.
77 No media types are currently defined. These exist for driver
80 #define MAX_UNITS 8 /* More are supported, limit only on options */
81 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
82 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
84 /* Do ugly workaround for GX server chipset errata. */
87 /* Operational parameters that are set at compile time. */
89 /* Keep the ring sizes a power of two for efficiency.
90 Making the Tx ring too long decreases the effectiveness of channel
91 bonding and packet priority.
92 There are no ill effects from too-large receive rings. */
93 #define TX_RING_SIZE 16
94 #define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
95 #define RX_RING_SIZE 64
96 #define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words)
97 #define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
98 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct yellowfin_desc)
100 /* Operational parameters that usually are not changed. */
101 /* Time in jiffies before concluding the transmitter is hung. */
102 #define TX_TIMEOUT (2*HZ)
103 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
105 #define yellowfin_debug debug
107 #if !defined(__OPTIMIZE__)
108 #warning You must compile this file with the correct options!
109 #warning See the last lines of the source file.
110 #error You must compile this driver with "-O".
113 #include <linux/module.h>
114 #include <linux/kernel.h>
115 #include <linux/string.h>
116 #include <linux/timer.h>
117 #include <linux/errno.h>
118 #include <linux/ioport.h>
119 #include <linux/slab.h>
120 #include <linux/interrupt.h>
121 #include <linux/pci.h>
122 #include <linux/init.h>
123 #include <linux/mii.h>
124 #include <linux/netdevice.h>
125 #include <linux/etherdevice.h>
126 #include <linux/skbuff.h>
127 #include <linux/ethtool.h>
128 #include <linux/crc32.h>
129 #include <asm/uaccess.h>
130 #include <asm/processor.h> /* Processor type for cache alignment. */
131 #include <asm/unaligned.h>
132 #include <asm/bitops.h>
135 /* These identify the driver base version and may not be removed. */
136 static char version[] __devinitdata =
137 KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
138 KERN_INFO " http://www.scyld.com/network/yellowfin.html\n"
139 KERN_INFO " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
154 #endif /* !USE_IO_OPS */
155 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
156 MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
157 MODULE_LICENSE("GPL");
159 MODULE_PARM(max_interrupt_work, "i");
160 MODULE_PARM(mtu, "i");
161 MODULE_PARM(debug, "i");
162 MODULE_PARM(rx_copybreak, "i");
163 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
164 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
165 MODULE_PARM(gx_fix, "i");
166 MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
167 MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
168 MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
169 MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
170 MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
171 MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
172 MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
177 I. Board Compatibility
179 This device driver is designed for the Packet Engines "Yellowfin" Gigabit
180 Ethernet adapter. The only PCA currently supported is the G-NIC 64-bit
183 II. Board-specific settings
185 PCI bus devices are configured by the system at boot time, so no jumpers
186 need to be set on the board. The system BIOS preferably should assign the
187 PCI INTA signal to an otherwise unused system IRQ line.
188 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
191 III. Driver operation
195 The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
196 This is a descriptor list scheme similar to that used by the EEPro100 and
197 Tulip. This driver uses two statically allocated fixed-size descriptor lists
198 formed into rings by a branch from the final descriptor to the beginning of
199 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
201 The driver allocates full frame size skbuffs for the Rx ring buffers at
202 open() time and passes the skb->data field to the Yellowfin as receive data
203 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
204 a fresh skbuff is allocated and the frame is copied to the new skbuff.
205 When the incoming frame is larger, the skbuff is passed directly up the
206 protocol stack and replaced by a newly allocated skbuff.
208 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
209 using a full-sized skbuff for small frames vs. the copying costs of larger
210 frames. For small frames the copying cost is negligible (esp. considering
211 that we are pre-loading the cache with immediately useful header
212 information). For large frames the copying cost is non-trivial, and the
213 larger copy might flush the cache of useful data.
215 IIIC. Synchronization
217 The driver runs as two independent, single-threaded flows of control. One
218 is the send-packet routine, which enforces single-threaded use by the
219 dev->tbusy flag. The other thread is the interrupt handler, which is single
220 threaded by the hardware and other software.
222 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
223 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
224 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
225 the 'yp->tx_full' flag.
227 The interrupt handler has exclusive control over the Rx ring and records stats
228 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
229 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
230 clears both the tx_full and tbusy flags.
234 Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
235 Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
236 and an AlphaStation to verifty the Alpha port!
240 Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
241 Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
243 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
244 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
248 See Packet Engines confidential appendix (prototype chips only).
253 enum pci_id_flags_bits {
254 /* Set PCI command register bits before calling probe1(). */
255 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
256 /* Read and map the single following PCI BAR. */
257 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
258 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
259 PCI_UNUSED_IRQ=0x800,
261 enum capability_flags {
262 HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
263 HasMACAddrBug=32, DontUseEeprom=64, /* Only on early revs. */
265 /* The PCI I/O space extent. */
266 #define YELLOWFIN_SIZE 0x100
268 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
270 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
276 int pci, pci_mask, subsystem, subsystem_mask;
277 int revision, revision_mask; /* Only 8 bits. */
279 enum pci_id_flags_bits pci_flags;
280 int io_size; /* Needed for I/O region check or ioremap(). */
281 int drv_flags; /* Driver use, intended as capability flags. */
284 static struct pci_id_info pci_id_tbl[] = {
285 {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
286 PCI_IOTYPE, YELLOWFIN_SIZE,
287 FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug},
288 {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
289 PCI_IOTYPE, YELLOWFIN_SIZE, HasMII | DontUseEeprom },
293 static struct pci_device_id yellowfin_pci_tbl[] __devinitdata = {
294 { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
295 { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
298 MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
301 /* Offsets to the Yellowfin registers. Various sizes and alignments. */
302 enum yellowfin_offsets {
303 TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
304 TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
305 RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
306 RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
307 EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
308 ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
309 Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
310 MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
312 RxDepth=0xB8, FlowCtrl=0xBC,
313 AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
314 EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
318 /* The Yellowfin Rx and Tx buffer descriptors.
319 Elements are written as 32 bit for endian portability. */
320 struct yellowfin_desc {
327 struct tx_status_words {
333 #else /* Little endian chips. */
338 #endif /* __BIG_ENDIAN */
341 /* Bits in yellowfin_desc.cmd */
343 CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
344 CMD_NOP=0x60000000, CMD_STOP=0x70000000,
345 BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
346 BRANCH_IFTRUE=0x040000,
349 /* Bits in yellowfin_desc.status */
350 enum desc_status_bits { RX_EOP=0x0040, };
352 /* Bits in the interrupt status/mask registers. */
353 enum intr_status_bits {
354 IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
355 IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
356 IntrEarlyRx=0x100, IntrWakeup=0x200, };
358 #define PRIV_ALIGN 31 /* Required alignment mask */
360 struct yellowfin_private {
361 /* Descriptor rings first for alignment.
362 Tx requires a second descriptor for status. */
363 struct yellowfin_desc *rx_ring;
364 struct yellowfin_desc *tx_ring;
365 struct sk_buff* rx_skbuff[RX_RING_SIZE];
366 struct sk_buff* tx_skbuff[TX_RING_SIZE];
367 dma_addr_t rx_ring_dma;
368 dma_addr_t tx_ring_dma;
370 struct tx_status_words *tx_status;
371 dma_addr_t tx_status_dma;
373 struct timer_list timer; /* Media selection timer. */
374 struct net_device_stats stats;
375 /* Frequently used and paired value: keep adjacent for cache effect. */
376 int chip_id, drv_flags;
377 struct pci_dev *pci_dev;
378 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
379 unsigned int rx_buf_sz; /* Based on MTU+slack. */
380 struct tx_status_words *tx_tail_desc;
381 unsigned int cur_tx, dirty_tx;
383 unsigned int tx_full:1; /* The Tx queue is full. */
384 unsigned int full_duplex:1; /* Full-duplex operation requested. */
385 unsigned int duplex_lock:1;
386 unsigned int medialock:1; /* Do not sense media. */
387 unsigned int default_port:4; /* Last dev->if_port value. */
388 /* MII transceiver section. */
389 int mii_cnt; /* MII device addresses. */
390 u16 advertising; /* NWay media advertisement */
391 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used */
395 static int read_eeprom(long ioaddr, int location);
396 static int mdio_read(long ioaddr, int phy_id, int location);
397 static void mdio_write(long ioaddr, int phy_id, int location, int value);
398 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
399 static int yellowfin_open(struct net_device *dev);
400 static void yellowfin_timer(unsigned long data);
401 static void yellowfin_tx_timeout(struct net_device *dev);
402 static void yellowfin_init_ring(struct net_device *dev);
403 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
404 static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
405 static int yellowfin_rx(struct net_device *dev);
406 static void yellowfin_error(struct net_device *dev, int intr_status);
407 static int yellowfin_close(struct net_device *dev);
408 static struct net_device_stats *yellowfin_get_stats(struct net_device *dev);
409 static void set_rx_mode(struct net_device *dev);
412 static int __devinit yellowfin_init_one(struct pci_dev *pdev,
413 const struct pci_device_id *ent)
415 struct net_device *dev;
416 struct yellowfin_private *np;
418 int chip_idx = ent->driver_data;
420 long ioaddr, real_ioaddr;
421 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
422 int drv_flags = pci_id_tbl[chip_idx].drv_flags;
426 /* when built into the kernel, we only print version if device is found */
428 static int printed_version;
429 if (!printed_version++)
433 i = pci_enable_device(pdev);
436 dev = alloc_etherdev(sizeof(*np));
438 printk (KERN_ERR PFX "cannot allocate ethernet device\n");
441 SET_MODULE_OWNER(dev);
445 if (pci_request_regions(pdev, dev->name))
446 goto err_out_free_netdev;
448 pci_set_master (pdev);
451 real_ioaddr = ioaddr = pci_resource_start (pdev, 0);
453 real_ioaddr = ioaddr = pci_resource_start (pdev, 1);
454 ioaddr = (long) ioremap(ioaddr, YELLOWFIN_SIZE);
456 goto err_out_free_res;
460 if (drv_flags & DontUseEeprom)
461 for (i = 0; i < 6; i++)
462 dev->dev_addr[i] = inb(ioaddr + StnAddr + i);
464 int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
465 for (i = 0; i < 6; i++)
466 dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
469 /* Reset the chip. */
470 outl(0x80000000, ioaddr + DMACtrl);
472 dev->base_addr = ioaddr;
475 pci_set_drvdata(pdev, dev);
476 spin_lock_init(&np->lock);
479 np->chip_id = chip_idx;
480 np->drv_flags = drv_flags;
482 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
484 goto err_out_cleardev;
485 np->tx_ring = (struct yellowfin_desc *)ring_space;
486 np->tx_ring_dma = ring_dma;
488 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
490 goto err_out_unmap_tx;
491 np->rx_ring = (struct yellowfin_desc *)ring_space;
492 np->rx_ring_dma = ring_dma;
494 ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
496 goto err_out_unmap_rx;
497 np->tx_status = (struct tx_status_words *)ring_space;
498 np->tx_status_dma = ring_dma;
501 option = dev->mem_start;
503 /* The lower four bits are the media type. */
507 np->default_port = option & 15;
508 if (np->default_port)
511 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
517 /* The Yellowfin-specific entries in the device structure. */
518 dev->open = &yellowfin_open;
519 dev->hard_start_xmit = &yellowfin_start_xmit;
520 dev->stop = &yellowfin_close;
521 dev->get_stats = &yellowfin_get_stats;
522 dev->set_multicast_list = &set_rx_mode;
523 dev->do_ioctl = &netdev_ioctl;
524 dev->tx_timeout = yellowfin_tx_timeout;
525 dev->watchdog_timeo = TX_TIMEOUT;
530 i = register_netdev(dev);
532 goto err_out_unmap_status;
534 printk(KERN_INFO "%s: %s type %8x at 0x%lx, ",
535 dev->name, pci_id_tbl[chip_idx].name, inl(ioaddr + ChipRev), ioaddr);
536 for (i = 0; i < 5; i++)
537 printk("%2.2x:", dev->dev_addr[i]);
538 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
540 if (np->drv_flags & HasMII) {
541 int phy, phy_idx = 0;
542 for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
543 int mii_status = mdio_read(ioaddr, phy, 1);
544 if (mii_status != 0xffff && mii_status != 0x0000) {
545 np->phys[phy_idx++] = phy;
546 np->advertising = mdio_read(ioaddr, phy, 4);
547 printk(KERN_INFO "%s: MII PHY found at address %d, status "
548 "0x%4.4x advertising %4.4x.\n",
549 dev->name, phy, mii_status, np->advertising);
552 np->mii_cnt = phy_idx;
559 err_out_unmap_status:
560 pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
563 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
565 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
567 pci_set_drvdata(pdev, NULL);
569 iounmap((void *)ioaddr);
572 pci_release_regions(pdev);
578 static int __devinit read_eeprom(long ioaddr, int location)
580 int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
582 outb(location, ioaddr + EEAddr);
583 outb(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
584 while ((inb(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
586 return inb(ioaddr + EERead);
589 /* MII Managemen Data I/O accesses.
590 These routines assume the MDIO controller is idle, and do not exit until
591 the command is finished. */
593 static int mdio_read(long ioaddr, int phy_id, int location)
597 outw((phy_id<<8) + location, ioaddr + MII_Addr);
598 outw(1, ioaddr + MII_Cmd);
599 for (i = 10000; i >= 0; i--)
600 if ((inw(ioaddr + MII_Status) & 1) == 0)
602 return inw(ioaddr + MII_Rd_Data);
605 static void mdio_write(long ioaddr, int phy_id, int location, int value)
609 outw((phy_id<<8) + location, ioaddr + MII_Addr);
610 outw(value, ioaddr + MII_Wr_Data);
612 /* Wait for the command to finish. */
613 for (i = 10000; i >= 0; i--)
614 if ((inw(ioaddr + MII_Status) & 1) == 0)
620 static int yellowfin_open(struct net_device *dev)
622 struct yellowfin_private *yp = dev->priv;
623 long ioaddr = dev->base_addr;
626 /* Reset the chip. */
627 outl(0x80000000, ioaddr + DMACtrl);
629 i = request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name, dev);
632 if (yellowfin_debug > 1)
633 printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
634 dev->name, dev->irq);
636 yellowfin_init_ring(dev);
638 outl(yp->rx_ring_dma, ioaddr + RxPtr);
639 outl(yp->tx_ring_dma, ioaddr + TxPtr);
641 for (i = 0; i < 6; i++)
642 outb(dev->dev_addr[i], ioaddr + StnAddr + i);
644 /* Set up various condition 'select' registers.
645 There are no options here. */
646 outl(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
647 outl(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
648 outl(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
649 outl(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
650 outl(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
651 outl(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
653 /* Initialize other registers: with so many this eventually this will
654 converted to an offset/value list. */
655 outl(dma_ctrl, ioaddr + DMACtrl);
656 outw(fifo_cfg, ioaddr + FIFOcfg);
657 /* Enable automatic generation of flow control frames, period 0xffff. */
658 outl(0x0030FFFF, ioaddr + FlowCtrl);
660 yp->tx_threshold = 32;
661 outl(yp->tx_threshold, ioaddr + TxThreshold);
663 if (dev->if_port == 0)
664 dev->if_port = yp->default_port;
666 netif_start_queue(dev);
668 /* Setting the Rx mode will start the Rx process. */
669 if (yp->drv_flags & IsGigabit) {
670 /* We are always in full-duplex mode with gigabit! */
672 outw(0x01CF, ioaddr + Cnfg);
674 outw(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
675 outw(0x1018, ioaddr + FrameGap1);
676 outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
680 /* Enable interrupts by setting the interrupt mask. */
681 outw(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
682 outw(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
683 outl(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
684 outl(0x80008000, ioaddr + TxCtrl);
686 if (yellowfin_debug > 2) {
687 printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
691 /* Set the timer to check for link beat. */
692 init_timer(&yp->timer);
693 yp->timer.expires = jiffies + 3*HZ;
694 yp->timer.data = (unsigned long)dev;
695 yp->timer.function = &yellowfin_timer; /* timer handler */
696 add_timer(&yp->timer);
701 static void yellowfin_timer(unsigned long data)
703 struct net_device *dev = (struct net_device *)data;
704 struct yellowfin_private *yp = dev->priv;
705 long ioaddr = dev->base_addr;
706 int next_tick = 60*HZ;
708 if (yellowfin_debug > 3) {
709 printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
710 dev->name, inw(ioaddr + IntrStatus));
714 int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
715 int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
716 int negotiated = lpa & yp->advertising;
717 if (yellowfin_debug > 1)
718 printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
719 "link partner capability %4.4x.\n",
720 dev->name, yp->phys[0], bmsr, lpa);
722 yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
724 outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
726 if (bmsr & BMSR_LSTATUS)
732 yp->timer.expires = jiffies + next_tick;
733 add_timer(&yp->timer);
736 static void yellowfin_tx_timeout(struct net_device *dev)
738 struct yellowfin_private *yp = dev->priv;
739 long ioaddr = dev->base_addr;
741 printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
742 "status %4.4x, Rx status %4.4x, resetting...\n",
743 dev->name, yp->cur_tx, yp->dirty_tx,
744 inl(ioaddr + TxStatus), inl(ioaddr + RxStatus));
746 /* Note: these should be KERN_DEBUG. */
747 if (yellowfin_debug) {
749 printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring);
750 for (i = 0; i < RX_RING_SIZE; i++)
751 printk(" %8.8x", yp->rx_ring[i].result_status);
752 printk("\n"KERN_WARNING" Tx ring %p: ", yp->tx_ring);
753 for (i = 0; i < TX_RING_SIZE; i++)
754 printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs,
755 yp->tx_ring[i].result_status);
759 /* If the hardware is found to hang regularly, we will update the code
760 to reinitialize the chip here. */
763 /* Wake the potentially-idle transmit channel. */
764 outl(0x10001000, dev->base_addr + TxCtrl);
765 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
766 netif_wake_queue (dev); /* Typical path */
768 dev->trans_start = jiffies;
769 yp->stats.tx_errors++;
772 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
773 static void yellowfin_init_ring(struct net_device *dev)
775 struct yellowfin_private *yp = dev->priv;
779 yp->cur_rx = yp->cur_tx = 0;
782 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
784 for (i = 0; i < RX_RING_SIZE; i++) {
785 yp->rx_ring[i].dbdma_cmd =
786 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
787 yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
788 ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
791 for (i = 0; i < RX_RING_SIZE; i++) {
792 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
793 yp->rx_skbuff[i] = skb;
796 skb->dev = dev; /* Mark as being used by this device. */
797 skb_reserve(skb, 2); /* 16 byte align the IP header. */
798 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
799 skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
801 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
802 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
806 /* In this mode the Tx ring needs only a single descriptor. */
807 for (i = 0; i < TX_RING_SIZE; i++) {
808 yp->tx_skbuff[i] = 0;
809 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
810 yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
811 ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
814 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
819 /* Tx ring needs a pair of descriptors, the second for the status. */
820 for (i = 0; i < TX_RING_SIZE; i++) {
822 yp->tx_skbuff[i] = 0;
823 /* Branch on Tx error. */
824 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
825 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
826 (j+1)*sizeof(struct yellowfin_desc);
828 if (yp->flags & FullTxStatus) {
829 yp->tx_ring[j].dbdma_cmd =
830 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
831 yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
832 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
833 i*sizeof(struct tx_status_words);
835 /* Symbios chips write only tx_errs word. */
836 yp->tx_ring[j].dbdma_cmd =
837 cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
838 yp->tx_ring[j].request_cnt = 2;
839 /* Om pade ummmmm... */
840 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
841 i*sizeof(struct tx_status_words) +
842 &(yp->tx_status[0].tx_errs) -
843 &(yp->tx_status[0]));
845 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
846 ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
849 yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
852 yp->tx_tail_desc = &yp->tx_status[0];
856 static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
858 struct yellowfin_private *yp = dev->priv;
861 netif_stop_queue (dev);
863 /* Note: Ordering is important here, set the field with the
864 "ownership" bit last, and only then increment cur_tx. */
866 /* Calculate the next Tx descriptor entry. */
867 entry = yp->cur_tx % TX_RING_SIZE;
869 yp->tx_skbuff[entry] = skb;
871 if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
872 int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
873 /* Fix GX chipset errata. */
874 if (cacheline_end > 24 || cacheline_end == 0)
875 skb->len += 32 - cacheline_end + 1;
878 yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
879 skb->data, skb->len, PCI_DMA_TODEVICE));
880 yp->tx_ring[entry].result_status = 0;
881 if (entry >= TX_RING_SIZE-1) {
882 /* New stop command. */
883 yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
884 yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
885 cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | skb->len);
887 yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
888 yp->tx_ring[entry].dbdma_cmd =
889 cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | skb->len);
893 yp->tx_ring[entry<<1].request_cnt = skb->len;
894 yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
895 skb->data, skb->len, PCI_DMA_TODEVICE));
896 /* The input_last (status-write) command is constant, but we must
897 rewrite the subsequent 'stop' command. */
901 unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
902 yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
904 /* Final step -- overwrite the old 'stop' command. */
906 yp->tx_ring[entry<<1].dbdma_cmd =
907 cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
908 CMD_TX_PKT | BRANCH_IFTRUE) | skb->len);
911 /* Non-x86 Todo: explicitly flush cache lines here. */
913 /* Wake the potentially-idle transmit channel. */
914 outl(0x10001000, dev->base_addr + TxCtrl);
916 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
917 netif_start_queue (dev); /* Typical path */
920 dev->trans_start = jiffies;
922 if (yellowfin_debug > 4) {
923 printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
924 dev->name, yp->cur_tx, entry);
929 /* The interrupt handler does all of the Rx thread work and cleans up
930 after the Tx thread. */
931 static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
933 struct net_device *dev = dev_instance;
934 struct yellowfin_private *yp;
936 int boguscnt = max_interrupt_work;
938 #ifndef final_version /* Can never occur. */
940 printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq);
945 ioaddr = dev->base_addr;
948 spin_lock (&yp->lock);
951 u16 intr_status = inw(ioaddr + IntrClear);
953 if (yellowfin_debug > 4)
954 printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
955 dev->name, intr_status);
957 if (intr_status == 0)
960 if (intr_status & (IntrRxDone | IntrEarlyRx)) {
962 outl(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
966 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
967 int entry = yp->dirty_tx % TX_RING_SIZE;
970 if (yp->tx_ring[entry].result_status == 0)
972 skb = yp->tx_skbuff[entry];
973 yp->stats.tx_packets++;
974 yp->stats.tx_bytes += skb->len;
975 /* Free the original skb. */
976 pci_unmap_single(yp->pci_dev, yp->tx_ring[entry].addr,
977 skb->len, PCI_DMA_TODEVICE);
978 dev_kfree_skb_irq(skb);
979 yp->tx_skbuff[entry] = 0;
982 && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
983 /* The ring is no longer full, clear tbusy. */
985 netif_wake_queue(dev);
988 if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
989 unsigned dirty_tx = yp->dirty_tx;
991 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
993 /* Todo: optimize this. */
994 int entry = dirty_tx % TX_RING_SIZE;
995 u16 tx_errs = yp->tx_status[entry].tx_errs;
998 #ifndef final_version
999 if (yellowfin_debug > 5)
1000 printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
1001 "%4.4x %4.4x %4.4x %4.4x.\n",
1003 yp->tx_status[entry].tx_cnt,
1004 yp->tx_status[entry].tx_errs,
1005 yp->tx_status[entry].total_tx_cnt,
1006 yp->tx_status[entry].paused);
1009 break; /* It still hasn't been Txed */
1010 skb = yp->tx_skbuff[entry];
1011 if (tx_errs & 0xF810) {
1012 /* There was an major error, log it. */
1013 #ifndef final_version
1014 if (yellowfin_debug > 1)
1015 printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
1016 dev->name, tx_errs);
1018 yp->stats.tx_errors++;
1019 if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
1020 if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
1021 if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
1022 if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
1024 #ifndef final_version
1025 if (yellowfin_debug > 4)
1026 printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
1027 dev->name, tx_errs);
1029 yp->stats.tx_bytes += skb->len;
1030 yp->stats.collisions += tx_errs & 15;
1031 yp->stats.tx_packets++;
1033 /* Free the original skb. */
1034 pci_unmap_single(yp->pci_dev,
1035 yp->tx_ring[entry<<1].addr, skb->len,
1037 dev_kfree_skb_irq(skb);
1038 yp->tx_skbuff[entry] = 0;
1039 /* Mark status as empty. */
1040 yp->tx_status[entry].tx_errs = 0;
1043 #ifndef final_version
1044 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
1045 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1046 dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
1047 dirty_tx += TX_RING_SIZE;
1052 && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
1053 /* The ring is no longer full, clear tbusy. */
1055 netif_wake_queue(dev);
1058 yp->dirty_tx = dirty_tx;
1059 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
1063 /* Log errors and other uncommon events. */
1064 if (intr_status & 0x2ee) /* Abnormal error summary. */
1065 yellowfin_error(dev, intr_status);
1067 if (--boguscnt < 0) {
1068 printk(KERN_WARNING "%s: Too much work at interrupt, "
1069 "status=0x%4.4x.\n",
1070 dev->name, intr_status);
1075 if (yellowfin_debug > 3)
1076 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1077 dev->name, inw(ioaddr + IntrStatus));
1079 spin_unlock (&yp->lock);
1083 /* This routine is logically part of the interrupt handler, but separated
1084 for clarity and better register allocation. */
1085 static int yellowfin_rx(struct net_device *dev)
1087 struct yellowfin_private *yp = dev->priv;
1088 int entry = yp->cur_rx % RX_RING_SIZE;
1089 int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
1091 if (yellowfin_debug > 4) {
1092 printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
1093 entry, yp->rx_ring[entry].result_status);
1094 printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
1095 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
1096 yp->rx_ring[entry].result_status);
1099 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1101 struct yellowfin_desc *desc = &yp->rx_ring[entry];
1102 struct sk_buff *rx_skb = yp->rx_skbuff[entry];
1108 if(!desc->result_status)
1110 pci_dma_sync_single(yp->pci_dev, desc->addr,
1111 yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1112 desc_status = le32_to_cpu(desc->result_status) >> 16;
1113 buf_addr = rx_skb->tail;
1114 data_size = (le32_to_cpu(desc->dbdma_cmd) -
1115 le32_to_cpu(desc->result_status)) & 0xffff;
1116 frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2])));
1117 if (yellowfin_debug > 4)
1118 printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
1122 if ( ! (desc_status & RX_EOP)) {
1123 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
1124 " status %4.4x!\n", dev->name, desc_status);
1125 yp->stats.rx_length_errors++;
1126 } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
1127 /* There was a error. */
1128 if (yellowfin_debug > 3)
1129 printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
1131 yp->stats.rx_errors++;
1132 if (frame_status & 0x0060) yp->stats.rx_length_errors++;
1133 if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
1134 if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
1135 if (frame_status < 0) yp->stats.rx_dropped++;
1136 } else if ( !(yp->drv_flags & IsGigabit) &&
1137 ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
1138 u8 status1 = buf_addr[data_size-2];
1139 u8 status2 = buf_addr[data_size-1];
1140 yp->stats.rx_errors++;
1141 if (status1 & 0xC0) yp->stats.rx_length_errors++;
1142 if (status2 & 0x03) yp->stats.rx_frame_errors++;
1143 if (status2 & 0x04) yp->stats.rx_crc_errors++;
1144 if (status2 & 0x80) yp->stats.rx_dropped++;
1145 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1146 } else if ((yp->flags & HasMACAddrBug) &&
1147 memcmp(le32_to_cpu(yp->rx_ring_dma +
1148 entry*sizeof(struct yellowfin_desc)),
1149 dev->dev_addr, 6) != 0 &&
1150 memcmp(le32_to_cpu(yp->rx_ring_dma +
1151 entry*sizeof(struct yellowfin_desc)),
1152 "\377\377\377\377\377\377", 6) != 0) {
1153 if (bogus_rx++ == 0)
1154 printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:"
1156 dev->name, buf_addr[0], buf_addr[1], buf_addr[2],
1157 buf_addr[3], buf_addr[4], buf_addr[5]);
1160 struct sk_buff *skb;
1161 int pkt_len = data_size -
1162 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
1163 /* To verify: Yellowfin Length should omit the CRC! */
1165 #ifndef final_version
1166 if (yellowfin_debug > 4)
1167 printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
1168 " of %d, bogus_cnt %d.\n",
1169 pkt_len, data_size, boguscnt);
1171 /* Check if the packet is long enough to just pass up the skbuff
1172 without copying to a properly sized skbuff. */
1173 if (pkt_len > rx_copybreak) {
1174 skb_put(skb = rx_skb, pkt_len);
1175 pci_unmap_single(yp->pci_dev,
1176 yp->rx_ring[entry].addr,
1178 PCI_DMA_FROMDEVICE);
1179 yp->rx_skbuff[entry] = NULL;
1181 skb = dev_alloc_skb(pkt_len + 2);
1185 skb_reserve(skb, 2); /* 16 byte align the IP header */
1187 eth_copy_and_sum(skb, rx_skb->tail, pkt_len, 0);
1188 skb_put(skb, pkt_len);
1190 memcpy(skb_put(skb, pkt_len),
1191 rx_skb->tail, pkt_len);
1194 skb->protocol = eth_type_trans(skb, dev);
1196 dev->last_rx = jiffies;
1197 yp->stats.rx_packets++;
1198 yp->stats.rx_bytes += pkt_len;
1200 entry = (++yp->cur_rx) % RX_RING_SIZE;
1203 /* Refill the Rx ring buffers. */
1204 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1205 entry = yp->dirty_rx % RX_RING_SIZE;
1206 if (yp->rx_skbuff[entry] == NULL) {
1207 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
1209 break; /* Better luck next round. */
1210 yp->rx_skbuff[entry] = skb;
1211 skb->dev = dev; /* Mark as being used by this device. */
1212 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1213 yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1214 skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1216 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1217 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
1219 yp->rx_ring[entry - 1].dbdma_cmd =
1220 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
1222 yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
1223 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
1230 static void yellowfin_error(struct net_device *dev, int intr_status)
1232 struct yellowfin_private *yp = dev->priv;
1234 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1235 dev->name, intr_status);
1236 /* Hmmmmm, it's not clear what to do here. */
1237 if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
1238 yp->stats.tx_errors++;
1239 if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
1240 yp->stats.rx_errors++;
1243 static int yellowfin_close(struct net_device *dev)
1245 long ioaddr = dev->base_addr;
1246 struct yellowfin_private *yp = dev->priv;
1249 netif_stop_queue (dev);
1251 if (yellowfin_debug > 1) {
1252 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
1253 "Rx %4.4x Int %2.2x.\n",
1254 dev->name, inw(ioaddr + TxStatus),
1255 inw(ioaddr + RxStatus), inw(ioaddr + IntrStatus));
1256 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1257 dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
1260 /* Disable interrupts by clearing the interrupt mask. */
1261 outw(0x0000, ioaddr + IntrEnb);
1263 /* Stop the chip's Tx and Rx processes. */
1264 outl(0x80000000, ioaddr + RxCtrl);
1265 outl(0x80000000, ioaddr + TxCtrl);
1267 del_timer(&yp->timer);
1269 #if defined(__i386__)
1270 if (yellowfin_debug > 2) {
1271 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n", yp->tx_ring_dma);
1272 for (i = 0; i < TX_RING_SIZE*2; i++)
1273 printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
1274 inl(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
1275 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
1276 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
1277 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
1278 for (i = 0; i < TX_RING_SIZE; i++)
1279 printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n",
1280 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
1281 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
1283 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n", yp->rx_ring_dma);
1284 for (i = 0; i < RX_RING_SIZE; i++) {
1285 printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
1286 inl(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
1287 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
1288 yp->rx_ring[i].result_status);
1289 if (yellowfin_debug > 6) {
1290 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
1292 for (j = 0; j < 0x50; j++)
1294 get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
1300 #endif /* __i386__ debugging only */
1302 free_irq(dev->irq, dev);
1304 /* Free all the skbuffs in the Rx queue. */
1305 for (i = 0; i < RX_RING_SIZE; i++) {
1306 yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
1307 yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1308 if (yp->rx_skbuff[i]) {
1309 dev_kfree_skb(yp->rx_skbuff[i]);
1311 yp->rx_skbuff[i] = 0;
1313 for (i = 0; i < TX_RING_SIZE; i++) {
1314 if (yp->tx_skbuff[i])
1315 dev_kfree_skb(yp->tx_skbuff[i]);
1316 yp->tx_skbuff[i] = 0;
1319 #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
1320 if (yellowfin_debug > 0) {
1321 printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
1322 dev->name, bogus_rx);
1329 static struct net_device_stats *yellowfin_get_stats(struct net_device *dev)
1331 struct yellowfin_private *yp = dev->priv;
1335 /* Set or clear the multicast filter for this adaptor. */
1337 static void set_rx_mode(struct net_device *dev)
1339 struct yellowfin_private *yp = dev->priv;
1340 long ioaddr = dev->base_addr;
1341 u16 cfg_value = inw(ioaddr + Cnfg);
1343 /* Stop the Rx process to change any value. */
1344 outw(cfg_value & ~0x1000, ioaddr + Cnfg);
1345 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1346 /* Unconditionally log net taps. */
1347 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1348 outw(0x000F, ioaddr + AddrMode);
1349 } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
1350 /* Too many to filter well, or accept all multicasts. */
1351 outw(0x000B, ioaddr + AddrMode);
1352 } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
1353 struct dev_mc_list *mclist;
1356 memset(hash_table, 0, sizeof(hash_table));
1357 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1358 i++, mclist = mclist->next) {
1359 /* Due to a bug in the early chip versions, multiple filter
1360 slots must be set for each address. */
1361 if (yp->drv_flags & HasMulticastBug) {
1362 set_bit((ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f,
1364 set_bit((ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f,
1366 set_bit((ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f,
1369 set_bit((ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f,
1372 /* Copy the hash table to the chip. */
1373 for (i = 0; i < 4; i++)
1374 outw(hash_table[i], ioaddr + HashTbl + i*2);
1375 outw(0x0003, ioaddr + AddrMode);
1376 } else { /* Normal, unicast/broadcast-only mode. */
1377 outw(0x0001, ioaddr + AddrMode);
1379 /* Restart the Rx process. */
1380 outw(cfg_value | 0x1000, ioaddr + Cnfg);
1383 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1385 struct yellowfin_private *np = dev->priv;
1388 if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
1392 case ETHTOOL_GDRVINFO: {
1393 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
1394 strcpy(info.driver, DRV_NAME);
1395 strcpy(info.version, DRV_VERSION);
1396 strcpy(info.bus_info, np->pci_dev->slot_name);
1397 if (copy_to_user(useraddr, &info, sizeof(info)))
1407 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1409 struct yellowfin_private *np = dev->priv;
1410 long ioaddr = dev->base_addr;
1411 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
1415 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
1416 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1417 case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
1418 data->phy_id = np->phys[0] & 0x1f;
1421 case SIOCGMIIREG: /* Read MII PHY register. */
1422 case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
1423 data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
1426 case SIOCSMIIREG: /* Write MII PHY register. */
1427 case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
1428 if (!capable(CAP_NET_ADMIN))
1430 if (data->phy_id == np->phys[0]) {
1431 u16 value = data->val_in;
1432 switch (data->reg_num) {
1434 /* Check for autonegotiation on or reset. */
1435 np->medialock = (value & 0x9000) ? 0 : 1;
1437 np->full_duplex = (value & 0x0100) ? 1 : 0;
1439 case 4: np->advertising = value; break;
1441 /* Perhaps check_duplex(dev), depending on chip semantics. */
1443 mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1451 static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
1453 struct net_device *dev = pci_get_drvdata(pdev);
1454 struct yellowfin_private *np;
1460 pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
1462 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
1463 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
1464 unregister_netdev (dev);
1466 pci_release_regions (pdev);
1469 iounmap ((void *) dev->base_addr);
1473 pci_set_drvdata(pdev, NULL);
1477 static struct pci_driver yellowfin_driver = {
1479 id_table: yellowfin_pci_tbl,
1480 probe: yellowfin_init_one,
1481 remove: __devexit_p(yellowfin_remove_one),
1485 static int __init yellowfin_init (void)
1487 /* when a module, this is printed whether or not devices are found in probe */
1491 return pci_module_init (&yellowfin_driver);
1495 static void __exit yellowfin_cleanup (void)
1497 pci_unregister_driver (&yellowfin_driver);
1501 module_init(yellowfin_init);
1502 module_exit(yellowfin_cleanup);
1506 * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c yellowfin.c"
1507 * compile-command-alphaLX: "gcc -DMODULE -Wall -Wstrict-prototypes -O2 -c yellowfin.c -fomit-frame-pointer -fno-strength-reduce -mno-fp-regs -Wa,-m21164a -DBWX_USABLE -DBWIO_ENABLED"
1508 * simple-compile-command: "gcc -DMODULE -O6 -c yellowfin.c"