1 /* winbond-840.c: A Linux PCI network adapter device driver. */
3 Written 1998-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/drivers.html
20 Do not remove the copyright infomation.
21 Do not change the version information unless an improvement has been made.
22 Merely removing my name, as Compex has done in the past, does not count
28 * spin lock update, memory barriers, new style dma mappings
29 limit each tx buffer to < 1024 bytes
30 remove DescIntr from Rx descriptors (that's an Tx flag)
31 remove next pointer from Tx descriptors
32 synchronize tx_q_bytes
33 software reset in tx_timeout
34 Copyright (C) 2000 Manfred Spraul
37 support for big endian descriptors
38 Copyright (C) 2001 Manfred Spraul
39 * ethtool support (jgarzik)
40 * Replace some MII-related magic numbers with constants (jgarzik)
43 * enable pci_power_off
47 #define DRV_NAME "winbond-840"
48 #define DRV_VERSION "1.01-d"
49 #define DRV_RELDATE "Nov-17-2001"
52 /* Automatically extracted configuration info:
53 probe-func: winbond840_probe
54 config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
56 c-help-name: Winbond W89c840 PCI Ethernet support
57 c-help-symbol: CONFIG_WINBOND_840
58 c-help: This driver is for the Winbond W89c840 chip. It also works with
59 c-help: the TX9882 chip on the Compex RL100-ATX board.
60 c-help: More specific information and updates are available from
61 c-help: http://www.scyld.com/network/drivers.html
64 /* The user-configurable values.
65 These may be modified when a driver module is loaded.*/
67 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
68 static int max_interrupt_work = 20;
69 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
70 The '840 uses a 64 element hash table based on the Ethernet CRC. */
71 static int multicast_filter_limit = 32;
73 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
74 Setting to > 1518 effectively disables this feature. */
75 static int rx_copybreak;
77 /* Used to pass the media type, etc.
78 Both 'options[]' and 'full_duplex[]' should exist for driver
80 The media type is usually passed in 'options[]'.
82 #define MAX_UNITS 8 /* More are supported, limit only on options */
83 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
84 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 /* Operational parameters that are set at compile time. */
88 /* Keep the ring sizes a power of two for compile efficiency.
89 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
90 Making the Tx ring too large decreases the effectiveness of channel
91 bonding and packet priority.
92 There are no ill effects from too-large receive rings. */
93 #define TX_RING_SIZE 16
94 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
95 #define TX_QUEUE_LEN_RESTART 5
96 #define RX_RING_SIZE 32
98 #define TX_BUFLIMIT (1024-128)
100 /* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
101 To avoid overflowing we don't queue again until we have room for a
104 #define TX_FIFO_SIZE (2048)
105 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
108 /* Operational parameters that usually are not changed. */
109 /* Time in jiffies before concluding the transmitter is hung. */
110 #define TX_TIMEOUT (2*HZ)
112 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
117 #if !defined(__OPTIMIZE__)
118 #warning You must compile this file with the correct options!
119 #warning See the last lines of the source file.
120 #error You must compile this driver with "-O".
123 /* Include files, designed to support most kernel versions 2.0.0 and later. */
124 #include <linux/module.h>
125 #include <linux/kernel.h>
126 #include <linux/string.h>
127 #include <linux/timer.h>
128 #include <linux/errno.h>
129 #include <linux/ioport.h>
130 #include <linux/slab.h>
131 #include <linux/interrupt.h>
132 #include <linux/pci.h>
133 #include <linux/netdevice.h>
134 #include <linux/etherdevice.h>
135 #include <linux/skbuff.h>
136 #include <linux/init.h>
137 #include <linux/delay.h>
138 #include <linux/ethtool.h>
139 #include <linux/mii.h>
140 #include <linux/rtnetlink.h>
141 #include <linux/crc32.h>
142 #include <asm/uaccess.h>
143 #include <asm/processor.h> /* Processor type for cache alignment. */
144 #include <asm/bitops.h>
148 /* These identify the driver base version and may not be removed. */
149 static char version[] __devinitdata =
150 KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
151 KERN_INFO " http://www.scyld.com/network/drivers.html\n";
153 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
154 MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
155 MODULE_LICENSE("GPL");
157 MODULE_PARM(max_interrupt_work, "i");
158 MODULE_PARM(debug, "i");
159 MODULE_PARM(rx_copybreak, "i");
160 MODULE_PARM(multicast_filter_limit, "i");
161 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
162 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
163 MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
164 MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
165 MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
166 MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
167 MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
168 MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
173 I. Board Compatibility
175 This driver is for the Winbond w89c840 chip.
177 II. Board-specific settings
181 III. Driver operation
183 This chip is very similar to the Digital 21*4* "Tulip" family. The first
184 twelve registers and the descriptor format are nearly identical. Read a
185 Tulip manual for operational details.
187 A significant difference is that the multicast filter and station address are
188 stored in registers rather than loaded through a pseudo-transmit packet.
190 Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
191 full-sized packet we must use both data buffers in a descriptor. Thus the
192 driver uses ring mode where descriptors are implicitly sequential in memory,
193 rather than using the second descriptor address as a chain pointer to
194 subsequent descriptors.
198 If you are going to almost clone a Tulip, why not go all the way and avoid
199 the need for a new driver?
203 http://www.scyld.com/expert/100mbps.html
204 http://www.scyld.com/expert/NWay.html
205 http://www.winbond.com.tw/
209 A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
210 correctly detect a full FIFO, and queuing more than 2048 bytes may result in
211 silent data corruption.
213 Test with 'ping -s 10000' on a fast computer.
222 enum pci_id_flags_bits {
223 /* Set PCI command register bits before calling probe1(). */
224 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
225 /* Read and map the single following PCI BAR. */
226 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
227 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
229 enum chip_capability_flags {
230 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,};
232 #define W840_FLAGS (PCI_USES_IO | PCI_ADDR0 | PCI_USES_MASTER)
234 #define W840_FLAGS (PCI_USES_MEM | PCI_ADDR1 | PCI_USES_MASTER)
237 static struct pci_device_id w840_pci_tbl[] __devinitdata = {
238 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
239 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
240 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
243 MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
248 int pci, pci_mask, subsystem, subsystem_mask;
249 int revision, revision_mask; /* Only 8 bits. */
251 enum pci_id_flags_bits pci_flags;
252 int io_size; /* Needed for I/O region check or ioremap(). */
253 int drv_flags; /* Driver use, intended as capability flags. */
255 static struct pci_id_info pci_id_tbl[] = {
256 {"Winbond W89c840", /* Sometime a Level-One switch card. */
257 { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 },
258 W840_FLAGS, 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
259 {"Winbond W89c840", { 0x08401050, 0xffffffff, },
260 W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
261 {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,},
262 W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
263 {0,}, /* 0 terminated list. */
266 /* This driver was written to use PCI memory space, however some x86 systems
267 work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space
268 accesses instead of memory space. */
285 /* Offsets to the Command and Status Registers, "CSRs".
286 While similar to the Tulip, these registers are longword aligned.
287 Note: It's not useful to define symbolic names for every register bit in
288 the device. The name can only partially document the semantics and make
289 the driver longer and more difficult to read.
292 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
293 RxRingPtr=0x0C, TxRingPtr=0x10,
294 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
295 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
296 CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
297 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
298 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
301 /* Bits in the interrupt status/enable registers. */
302 /* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
303 enum intr_status_bits {
304 NormalIntr=0x10000, AbnormalIntr=0x8000,
305 IntrPCIErr=0x2000, TimerInt=0x800,
306 IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
307 TxFIFOUnderflow=0x20, RxErrIntr=0x10,
308 TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
311 /* Bits in the NetworkConfig register. */
313 AcceptErr=0x80, AcceptRunt=0x40,
314 AcceptBroadcast=0x20, AcceptMulticast=0x10,
315 AcceptAllPhys=0x08, AcceptMyPhys=0x02,
319 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
320 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
323 /* The Tulip Rx and Tx buffer descriptors. */
324 struct w840_rx_desc {
331 struct w840_tx_desc {
334 u32 buffer1, buffer2;
337 /* Bits in network_desc.status */
338 enum desc_status_bits {
339 DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
340 DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
344 #define PRIV_ALIGN 15 /* Required alignment mask */
345 #define MII_CNT 1 /* winbond only supports one MII */
346 struct netdev_private {
347 struct w840_rx_desc *rx_ring;
348 dma_addr_t rx_addr[RX_RING_SIZE];
349 struct w840_tx_desc *tx_ring;
350 dma_addr_t tx_addr[TX_RING_SIZE];
351 dma_addr_t ring_dma_addr;
352 /* The addresses of receive-in-place skbuffs. */
353 struct sk_buff* rx_skbuff[RX_RING_SIZE];
354 /* The saved address of a sent-in-place packet/buffer, for later free(). */
355 struct sk_buff* tx_skbuff[TX_RING_SIZE];
356 struct net_device_stats stats;
357 struct timer_list timer; /* Media monitoring timer. */
358 /* Frequently used values: keep some adjacent for cache effect. */
360 int chip_id, drv_flags;
361 struct pci_dev *pci_dev;
363 struct w840_rx_desc *rx_head_desc;
364 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
365 unsigned int rx_buf_sz; /* Based on MTU+slack. */
366 unsigned int cur_tx, dirty_tx;
367 unsigned int tx_q_bytes;
368 unsigned int tx_full; /* The Tx queue is full. */
369 /* MII transceiver section. */
370 int mii_cnt; /* MII device addresses. */
371 unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
373 struct mii_if_info mii_if;
376 static int eeprom_read(long ioaddr, int location);
377 static int mdio_read(struct net_device *dev, int phy_id, int location);
378 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
379 static int netdev_open(struct net_device *dev);
380 static int update_link(struct net_device *dev);
381 static void netdev_timer(unsigned long data);
382 static void init_rxtx_rings(struct net_device *dev);
383 static void free_rxtx_rings(struct netdev_private *np);
384 static void init_registers(struct net_device *dev);
385 static void tx_timeout(struct net_device *dev);
386 static int alloc_ringdesc(struct net_device *dev);
387 static void free_ringdesc(struct netdev_private *np);
388 static int start_tx(struct sk_buff *skb, struct net_device *dev);
389 static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
390 static void netdev_error(struct net_device *dev, int intr_status);
391 static int netdev_rx(struct net_device *dev);
392 static u32 __set_rx_mode(struct net_device *dev);
393 static void set_rx_mode(struct net_device *dev);
394 static struct net_device_stats *get_stats(struct net_device *dev);
395 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
396 static int netdev_close(struct net_device *dev);
400 static int __devinit w840_probe1 (struct pci_dev *pdev,
401 const struct pci_device_id *ent)
403 struct net_device *dev;
404 struct netdev_private *np;
406 int chip_idx = ent->driver_data;
408 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
411 i = pci_enable_device(pdev);
414 pci_set_master(pdev);
418 if (pci_set_dma_mask(pdev,0xFFFFffff)) {
419 printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
423 dev = alloc_etherdev(sizeof(*np));
426 SET_MODULE_OWNER(dev);
428 if (pci_request_regions(pdev, DRV_NAME))
432 ioaddr = pci_resource_start(pdev, 0);
434 ioaddr = pci_resource_start(pdev, 1);
435 ioaddr = (long) ioremap (ioaddr, pci_id_tbl[chip_idx].io_size);
437 goto err_out_free_res;
440 for (i = 0; i < 3; i++)
441 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
443 /* Reset the chip to erase previous misconfiguration.
444 No hold time required! */
445 writel(0x00000001, ioaddr + PCIBusCfg);
447 dev->base_addr = ioaddr;
452 np->chip_id = chip_idx;
453 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
454 spin_lock_init(&np->lock);
455 np->mii_if.dev = dev;
456 np->mii_if.mdio_read = mdio_read;
457 np->mii_if.mdio_write = mdio_write;
459 pci_set_drvdata(pdev, dev);
462 option = dev->mem_start;
464 /* The lower four bits are the media type. */
467 np->mii_if.full_duplex = 1;
469 printk(KERN_INFO "%s: ignoring user supplied media type %d",
470 dev->name, option & 15);
472 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
473 np->mii_if.full_duplex = 1;
475 if (np->mii_if.full_duplex)
476 np->mii_if.force_media = 1;
478 /* The chip-specific entries in the device structure. */
479 dev->open = &netdev_open;
480 dev->hard_start_xmit = &start_tx;
481 dev->stop = &netdev_close;
482 dev->get_stats = &get_stats;
483 dev->set_multicast_list = &set_rx_mode;
484 dev->do_ioctl = &netdev_ioctl;
485 dev->tx_timeout = &tx_timeout;
486 dev->watchdog_timeo = TX_TIMEOUT;
488 i = register_netdev(dev);
490 goto err_out_cleardev;
492 printk(KERN_INFO "%s: %s at 0x%lx, ",
493 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
494 for (i = 0; i < 5; i++)
495 printk("%2.2x:", dev->dev_addr[i]);
496 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
498 if (np->drv_flags & CanHaveMII) {
499 int phy, phy_idx = 0;
500 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
501 int mii_status = mdio_read(dev, phy, MII_BMSR);
502 if (mii_status != 0xffff && mii_status != 0x0000) {
503 np->phys[phy_idx++] = phy;
504 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
505 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
506 mdio_read(dev, phy, MII_PHYSID2);
507 printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status "
508 "0x%4.4x advertising %4.4x.\n",
509 dev->name, np->mii, phy, mii_status, np->mii_if.advertising);
512 np->mii_cnt = phy_idx;
513 np->mii_if.phy_id = np->phys[0];
515 printk(KERN_WARNING "%s: MII PHY not found -- this device may "
516 "not operate correctly.\n", dev->name);
524 pci_set_drvdata(pdev, NULL);
526 iounmap((void *)ioaddr);
529 pci_release_regions(pdev);
536 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
537 often serial bit streams generated by the host processor.
538 The example below is for the common 93c46 EEPROM, 64 16 bit words. */
540 /* Delay between EEPROM clock transitions.
541 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
542 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
543 made udelay() unreliable.
544 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
547 #define eeprom_delay(ee_addr) readl(ee_addr)
549 enum EEPROM_Ctrl_Bits {
550 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
551 EE_ChipSelect=0x801, EE_DataIn=0x08,
554 /* The EEPROM commands include the alway-set leading bit. */
556 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
559 static int eeprom_read(long addr, int location)
563 long ee_addr = addr + EECtrl;
564 int read_cmd = location | EE_ReadCmd;
565 writel(EE_ChipSelect, ee_addr);
567 /* Shift the read command bits out. */
568 for (i = 10; i >= 0; i--) {
569 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
570 writel(dataval, ee_addr);
571 eeprom_delay(ee_addr);
572 writel(dataval | EE_ShiftClk, ee_addr);
573 eeprom_delay(ee_addr);
575 writel(EE_ChipSelect, ee_addr);
576 eeprom_delay(ee_addr);
578 for (i = 16; i > 0; i--) {
579 writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
580 eeprom_delay(ee_addr);
581 retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
582 writel(EE_ChipSelect, ee_addr);
583 eeprom_delay(ee_addr);
586 /* Terminate the EEPROM access. */
591 /* MII transceiver control section.
592 Read and write the MII registers using software-generated serial
593 MDIO protocol. See the MII specifications or DP83840A data sheet
596 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
597 met by back-to-back 33Mhz PCI cycles. */
598 #define mdio_delay(mdio_addr) readl(mdio_addr)
600 /* Set iff a MII transceiver on any interface requires mdio preamble.
601 This only set with older tranceivers, so the extra
602 code size of a per-interface flag is not worthwhile. */
603 static char mii_preamble_required = 1;
605 #define MDIO_WRITE0 (MDIO_EnbOutput)
606 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
608 /* Generate the preamble required for initial synchronization and
609 a few older transceivers. */
610 static void mdio_sync(long mdio_addr)
614 /* Establish sync by sending at least 32 logic ones. */
615 while (--bits >= 0) {
616 writel(MDIO_WRITE1, mdio_addr);
617 mdio_delay(mdio_addr);
618 writel(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
619 mdio_delay(mdio_addr);
623 static int mdio_read(struct net_device *dev, int phy_id, int location)
625 long mdio_addr = dev->base_addr + MIICtrl;
626 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
629 if (mii_preamble_required)
630 mdio_sync(mdio_addr);
632 /* Shift the read command bits out. */
633 for (i = 15; i >= 0; i--) {
634 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
636 writel(dataval, mdio_addr);
637 mdio_delay(mdio_addr);
638 writel(dataval | MDIO_ShiftClk, mdio_addr);
639 mdio_delay(mdio_addr);
641 /* Read the two transition, 16 data, and wire-idle bits. */
642 for (i = 20; i > 0; i--) {
643 writel(MDIO_EnbIn, mdio_addr);
644 mdio_delay(mdio_addr);
645 retval = (retval << 1) | ((readl(mdio_addr) & MDIO_DataIn) ? 1 : 0);
646 writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
647 mdio_delay(mdio_addr);
649 return (retval>>1) & 0xffff;
652 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
654 struct netdev_private *np = dev->priv;
655 long mdio_addr = dev->base_addr + MIICtrl;
656 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
659 if (location == 4 && phy_id == np->phys[0])
660 np->mii_if.advertising = value;
662 if (mii_preamble_required)
663 mdio_sync(mdio_addr);
665 /* Shift the command bits out. */
666 for (i = 31; i >= 0; i--) {
667 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
669 writel(dataval, mdio_addr);
670 mdio_delay(mdio_addr);
671 writel(dataval | MDIO_ShiftClk, mdio_addr);
672 mdio_delay(mdio_addr);
674 /* Clear out extra bits. */
675 for (i = 2; i > 0; i--) {
676 writel(MDIO_EnbIn, mdio_addr);
677 mdio_delay(mdio_addr);
678 writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
679 mdio_delay(mdio_addr);
685 static int netdev_open(struct net_device *dev)
687 struct netdev_private *np = dev->priv;
688 long ioaddr = dev->base_addr;
691 writel(0x00000001, ioaddr + PCIBusCfg); /* Reset */
693 netif_device_detach(dev);
694 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
699 printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
700 dev->name, dev->irq);
702 if((i=alloc_ringdesc(dev)))
705 spin_lock_irq(&np->lock);
706 netif_device_attach(dev);
708 spin_unlock_irq(&np->lock);
710 netif_start_queue(dev);
712 printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
714 /* Set the timer to check for link beat. */
715 init_timer(&np->timer);
716 np->timer.expires = jiffies + 1*HZ;
717 np->timer.data = (unsigned long)dev;
718 np->timer.function = &netdev_timer; /* timer handler */
719 add_timer(&np->timer);
722 netif_device_attach(dev);
726 #define MII_DAVICOM_DM9101 0x0181b800
728 static int update_link(struct net_device *dev)
730 struct netdev_private *np = dev->priv;
731 int duplex, fasteth, result, mii_reg;
734 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
736 if (mii_reg == 0xffff)
738 /* reread: the link status bit is sticky */
739 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
740 if (!(mii_reg & 0x4)) {
741 if (netif_carrier_ok(dev)) {
743 printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n",
744 dev->name, np->phys[0]);
745 netif_carrier_off(dev);
749 if (!netif_carrier_ok(dev)) {
751 printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n",
752 dev->name, np->phys[0]);
753 netif_carrier_on(dev);
756 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
757 /* If the link partner doesn't support autonegotiation
758 * the MII detects it's abilities with the "parallel detection".
759 * Some MIIs update the LPA register to the result of the parallel
760 * detection, some don't.
761 * The Davicom PHY [at least 0181b800] doesn't.
762 * Instead bit 9 and 13 of the BMCR are updated to the result
763 * of the negotiation..
765 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
766 duplex = mii_reg & BMCR_FULLDPLX;
767 fasteth = mii_reg & BMCR_SPEED100;
770 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
771 negotiated = mii_reg & np->mii_if.advertising;
773 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
774 fasteth = negotiated & 0x380;
776 duplex |= np->mii_if.force_media;
777 /* remove fastether and fullduplex */
778 result = np->csr6 & ~0x20000200;
782 result |= 0x20000000;
783 if (result != np->csr6 && debug)
784 printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
785 dev->name, fasteth ? 100 : 10,
786 duplex ? "full" : "half", np->phys[0]);
790 #define RXTX_TIMEOUT 2000
791 static inline void update_csr6(struct net_device *dev, int new)
793 struct netdev_private *np = dev->priv;
794 long ioaddr = dev->base_addr;
795 int limit = RXTX_TIMEOUT;
797 if (!netif_device_present(dev))
801 /* stop both Tx and Rx processes */
802 writel(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
803 /* wait until they have really stopped */
805 int csr5 = readl(ioaddr + IntrStatus);
808 t = (csr5 >> 17) & 0x07;
811 t = (csr5 >> 20) & 0x07;
818 printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n",
825 /* and restart them with the new configuration */
826 writel(np->csr6, ioaddr + NetworkConfig);
828 np->mii_if.full_duplex = 1;
831 static void netdev_timer(unsigned long data)
833 struct net_device *dev = (struct net_device *)data;
834 struct netdev_private *np = dev->priv;
835 long ioaddr = dev->base_addr;
838 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
840 dev->name, (int)readl(ioaddr + IntrStatus),
841 (int)readl(ioaddr + NetworkConfig));
842 spin_lock_irq(&np->lock);
843 update_csr6(dev, update_link(dev));
844 spin_unlock_irq(&np->lock);
845 np->timer.expires = jiffies + 10*HZ;
846 add_timer(&np->timer);
849 static void init_rxtx_rings(struct net_device *dev)
851 struct netdev_private *np = dev->priv;
854 np->rx_head_desc = &np->rx_ring[0];
855 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
857 /* Initial all Rx descriptors. */
858 for (i = 0; i < RX_RING_SIZE; i++) {
859 np->rx_ring[i].length = np->rx_buf_sz;
860 np->rx_ring[i].status = 0;
861 np->rx_skbuff[i] = 0;
863 /* Mark the last entry as wrapping the ring. */
864 np->rx_ring[i-1].length |= DescEndRing;
866 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
867 for (i = 0; i < RX_RING_SIZE; i++) {
868 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
869 np->rx_skbuff[i] = skb;
872 skb->dev = dev; /* Mark as being used by this device. */
873 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->tail,
874 skb->len,PCI_DMA_FROMDEVICE);
876 np->rx_ring[i].buffer1 = np->rx_addr[i];
877 np->rx_ring[i].status = DescOwn;
881 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
883 /* Initialize the Tx descriptors */
884 for (i = 0; i < TX_RING_SIZE; i++) {
885 np->tx_skbuff[i] = 0;
886 np->tx_ring[i].status = 0;
889 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
891 writel(np->ring_dma_addr, dev->base_addr + RxRingPtr);
892 writel(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
893 dev->base_addr + TxRingPtr);
897 static void free_rxtx_rings(struct netdev_private* np)
900 /* Free all the skbuffs in the Rx queue. */
901 for (i = 0; i < RX_RING_SIZE; i++) {
902 np->rx_ring[i].status = 0;
903 if (np->rx_skbuff[i]) {
904 pci_unmap_single(np->pci_dev,
906 np->rx_skbuff[i]->len,
908 dev_kfree_skb(np->rx_skbuff[i]);
910 np->rx_skbuff[i] = 0;
912 for (i = 0; i < TX_RING_SIZE; i++) {
913 if (np->tx_skbuff[i]) {
914 pci_unmap_single(np->pci_dev,
916 np->tx_skbuff[i]->len,
918 dev_kfree_skb(np->tx_skbuff[i]);
920 np->tx_skbuff[i] = 0;
924 static void init_registers(struct net_device *dev)
926 struct netdev_private *np = dev->priv;
927 long ioaddr = dev->base_addr;
930 for (i = 0; i < 6; i++)
931 writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
933 /* Initialize other registers. */
935 i = (1<<20); /* Big-endian descriptors */
939 i |= (0x04<<2); /* skip length 4 u32 */
940 i |= 0x02; /* give Rx priority */
942 /* Configure the PCI bus bursts and FIFO thresholds.
943 486: Set 8 longword cache alignment, 8 longword burst.
944 586: Set 16 longword cache alignment, no burst limit.
945 Cache alignment bits 15:14 Burst length 13:8
946 0000 <not allowed> 0000 align to cache 0800 8 longwords
947 4000 8 longwords 0100 1 longword 1000 16 longwords
948 8000 16 longwords 0200 2 longwords 2000 32 longwords
949 C000 32 longwords 0400 4 longwords */
951 #if defined (__i386__) && !defined(MODULE)
952 /* When not a module we can work around broken '486 PCI boards. */
953 if (boot_cpu_data.x86 <= 4) {
955 printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "
956 "alignment to 8 longwords.\n", dev->name);
960 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
962 #elif defined(__sparc__)
965 #warning Processor architecture undefined
968 writel(i, ioaddr + PCIBusCfg);
971 /* 128 byte Tx threshold;
972 Transmit on; Receive on; */
973 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
975 /* Clear and Enable interrupts by setting the interrupt mask. */
976 writel(0x1A0F5, ioaddr + IntrStatus);
977 writel(0x1A0F5, ioaddr + IntrEnable);
979 writel(0, ioaddr + RxStartDemand);
982 static void tx_timeout(struct net_device *dev)
984 struct netdev_private *np = dev->priv;
985 long ioaddr = dev->base_addr;
987 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
988 " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
992 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
993 for (i = 0; i < RX_RING_SIZE; i++)
994 printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
995 printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
996 for (i = 0; i < TX_RING_SIZE; i++)
997 printk(" %8.8x", np->tx_ring[i].status);
1000 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
1001 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
1002 printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",readl(ioaddr+0x4C));
1004 disable_irq(dev->irq);
1005 spin_lock_irq(&np->lock);
1007 * Under high load dirty_tx and the internal tx descriptor pointer
1008 * come out of sync, thus perform a software reset and reinitialize
1012 writel(1, dev->base_addr+PCIBusCfg);
1015 free_rxtx_rings(np);
1016 init_rxtx_rings(dev);
1017 init_registers(dev);
1018 spin_unlock_irq(&np->lock);
1019 enable_irq(dev->irq);
1021 netif_wake_queue(dev);
1022 dev->trans_start = jiffies;
1023 np->stats.tx_errors++;
1027 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1028 static int alloc_ringdesc(struct net_device *dev)
1030 struct netdev_private *np = dev->priv;
1032 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1034 np->rx_ring = pci_alloc_consistent(np->pci_dev,
1035 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
1036 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
1037 &np->ring_dma_addr);
1040 init_rxtx_rings(dev);
1044 static void free_ringdesc(struct netdev_private *np)
1046 pci_free_consistent(np->pci_dev,
1047 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
1048 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
1049 np->rx_ring, np->ring_dma_addr);
1053 static int start_tx(struct sk_buff *skb, struct net_device *dev)
1055 struct netdev_private *np = dev->priv;
1058 /* Caution: the write order is important here, set the field
1059 with the "ownership" bits last. */
1061 /* Calculate the next Tx descriptor entry. */
1062 entry = np->cur_tx % TX_RING_SIZE;
1064 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1065 skb->data,skb->len, PCI_DMA_TODEVICE);
1066 np->tx_skbuff[entry] = skb;
1068 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1069 if (skb->len < TX_BUFLIMIT) {
1070 np->tx_ring[entry].length = DescWholePkt | skb->len;
1072 int len = skb->len - TX_BUFLIMIT;
1074 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1075 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1077 if(entry == TX_RING_SIZE-1)
1078 np->tx_ring[entry].length |= DescEndRing;
1080 /* Now acquire the irq spinlock.
1081 * The difficult race is the ordering between
1082 * increasing np->cur_tx and setting DescOwn:
1083 * - if np->cur_tx is increased first the interrupt
1084 * handler could consider the packet as transmitted
1085 * since DescOwn is cleared.
1086 * - If DescOwn is set first the NIC could report the
1087 * packet as sent, but the interrupt handler would ignore it
1088 * since the np->cur_tx was not yet increased.
1090 spin_lock_irq(&np->lock);
1093 wmb(); /* flush length, buffer1, buffer2 */
1094 np->tx_ring[entry].status = DescOwn;
1095 wmb(); /* flush status and kick the hardware */
1096 writel(0, dev->base_addr + TxStartDemand);
1097 np->tx_q_bytes += skb->len;
1098 /* Work around horrible bug in the chip by marking the queue as full
1099 when we do not have FIFO room for a maximum sized packet. */
1100 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1101 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1102 netif_stop_queue(dev);
1106 spin_unlock_irq(&np->lock);
1108 dev->trans_start = jiffies;
1111 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1112 dev->name, np->cur_tx, entry);
1117 static void netdev_tx_done(struct net_device *dev)
1119 struct netdev_private *np = dev->priv;
1120 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1121 int entry = np->dirty_tx % TX_RING_SIZE;
1122 int tx_status = np->tx_ring[entry].status;
1126 if (tx_status & 0x8000) { /* There was an error, log it. */
1127 #ifndef final_version
1129 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1130 dev->name, tx_status);
1132 np->stats.tx_errors++;
1133 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1134 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1135 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1136 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1137 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1138 np->stats.tx_heartbeat_errors++;
1140 #ifndef final_version
1142 printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",
1143 dev->name, entry, tx_status);
1145 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1146 np->stats.collisions += (tx_status >> 3) & 15;
1147 np->stats.tx_packets++;
1149 /* Free the original skb. */
1150 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1151 np->tx_skbuff[entry]->len,
1153 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1154 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1155 np->tx_skbuff[entry] = 0;
1158 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1159 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1160 /* The ring is no longer full, clear tbusy. */
1163 netif_wake_queue(dev);
1167 /* The interrupt handler does all of the Rx thread work and cleans up
1168 after the Tx thread. */
1169 static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1171 struct net_device *dev = (struct net_device *)dev_instance;
1172 struct netdev_private *np = dev->priv;
1173 long ioaddr = dev->base_addr;
1174 int work_limit = max_interrupt_work;
1176 if (!netif_device_present(dev))
1179 u32 intr_status = readl(ioaddr + IntrStatus);
1181 /* Acknowledge all of the current interrupt sources ASAP. */
1182 writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
1185 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1186 dev->name, intr_status);
1188 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1191 if (intr_status & (IntrRxDone | RxNoBuf))
1193 if (intr_status & RxNoBuf)
1194 writel(0, ioaddr + RxStartDemand);
1196 if (intr_status & (TxIdle | IntrTxDone) &&
1197 np->cur_tx != np->dirty_tx) {
1198 spin_lock(&np->lock);
1199 netdev_tx_done(dev);
1200 spin_unlock(&np->lock);
1203 /* Abnormal error summary/uncommon events handlers. */
1204 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |
1205 TimerInt | IntrTxStopped))
1206 netdev_error(dev, intr_status);
1208 if (--work_limit < 0) {
1209 printk(KERN_WARNING "%s: Too much work at interrupt, "
1210 "status=0x%4.4x.\n", dev->name, intr_status);
1211 /* Set the timer to re-enable the other interrupts after
1213 spin_lock(&np->lock);
1214 if (netif_device_present(dev)) {
1215 writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1216 writel(10, ioaddr + GPTimer);
1218 spin_unlock(&np->lock);
1224 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1225 dev->name, (int)readl(ioaddr + IntrStatus));
1228 /* This routine is logically part of the interrupt handler, but separated
1229 for clarity and better register allocation. */
1230 static int netdev_rx(struct net_device *dev)
1232 struct netdev_private *np = dev->priv;
1233 int entry = np->cur_rx % RX_RING_SIZE;
1234 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1237 printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
1238 entry, np->rx_ring[entry].status);
1241 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1242 while (--work_limit >= 0) {
1243 struct w840_rx_desc *desc = np->rx_head_desc;
1244 s32 status = desc->status;
1247 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1251 if ((status & 0x38008300) != 0x0300) {
1252 if ((status & 0x38000300) != 0x0300) {
1253 /* Ingore earlier buffers. */
1254 if ((status & 0xffff) != 0x7fff) {
1255 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1256 "multiple buffers, entry %#x status %4.4x!\n",
1257 dev->name, np->cur_rx, status);
1258 np->stats.rx_length_errors++;
1260 } else if (status & 0x8000) {
1261 /* There was a fatal error. */
1263 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1265 np->stats.rx_errors++; /* end of a packet.*/
1266 if (status & 0x0890) np->stats.rx_length_errors++;
1267 if (status & 0x004C) np->stats.rx_frame_errors++;
1268 if (status & 0x0002) np->stats.rx_crc_errors++;
1271 struct sk_buff *skb;
1272 /* Omit the four octet CRC from the length. */
1273 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1275 #ifndef final_version
1277 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1278 " status %x.\n", pkt_len, status);
1280 /* Check if the packet is long enough to accept without copying
1281 to a minimally-sized skbuff. */
1282 if (pkt_len < rx_copybreak
1283 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1285 skb_reserve(skb, 2); /* 16 byte align the IP header */
1286 pci_dma_sync_single(np->pci_dev,np->rx_addr[entry],
1287 np->rx_skbuff[entry]->len,
1288 PCI_DMA_FROMDEVICE);
1289 /* Call copy + cksum if available. */
1291 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1292 skb_put(skb, pkt_len);
1294 memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
1298 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1299 np->rx_skbuff[entry]->len,
1300 PCI_DMA_FROMDEVICE);
1301 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1302 np->rx_skbuff[entry] = NULL;
1304 #ifndef final_version /* Remove after testing. */
1305 /* You will want this info for the initial debug. */
1307 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1308 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
1310 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1311 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1312 skb->data[8], skb->data[9], skb->data[10],
1313 skb->data[11], skb->data[12], skb->data[13],
1314 skb->data[14], skb->data[15], skb->data[16],
1317 skb->protocol = eth_type_trans(skb, dev);
1319 dev->last_rx = jiffies;
1320 np->stats.rx_packets++;
1321 np->stats.rx_bytes += pkt_len;
1323 entry = (++np->cur_rx) % RX_RING_SIZE;
1324 np->rx_head_desc = &np->rx_ring[entry];
1327 /* Refill the Rx ring buffers. */
1328 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1329 struct sk_buff *skb;
1330 entry = np->dirty_rx % RX_RING_SIZE;
1331 if (np->rx_skbuff[entry] == NULL) {
1332 skb = dev_alloc_skb(np->rx_buf_sz);
1333 np->rx_skbuff[entry] = skb;
1335 break; /* Better luck next round. */
1336 skb->dev = dev; /* Mark as being used by this device. */
1337 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1339 skb->len, PCI_DMA_FROMDEVICE);
1340 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1343 np->rx_ring[entry].status = DescOwn;
1349 static void netdev_error(struct net_device *dev, int intr_status)
1351 long ioaddr = dev->base_addr;
1352 struct netdev_private *np = dev->priv;
1355 printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
1356 dev->name, intr_status);
1357 if (intr_status == 0xffffffff)
1359 spin_lock(&np->lock);
1360 if (intr_status & TxFIFOUnderflow) {
1362 /* Bump up the Tx threshold */
1364 /* This causes lots of dropped packets,
1365 * and under high load even tx_timeouts
1367 new = np->csr6 + 0x4000;
1369 new = (np->csr6 >> 14)&0x7f;
1373 new = 127; /* load full packet before starting */
1374 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1376 printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n",
1378 update_csr6(dev, new);
1380 if (intr_status & IntrRxDied) { /* Missed a Rx frame. */
1381 np->stats.rx_errors++;
1383 if (intr_status & TimerInt) {
1384 /* Re-enable other interrupts. */
1385 if (netif_device_present(dev))
1386 writel(0x1A0F5, ioaddr + IntrEnable);
1388 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1389 writel(0, ioaddr + RxStartDemand);
1390 spin_unlock(&np->lock);
1393 static struct net_device_stats *get_stats(struct net_device *dev)
1395 long ioaddr = dev->base_addr;
1396 struct netdev_private *np = dev->priv;
1398 /* The chip only need report frame silently dropped. */
1399 spin_lock_irq(&np->lock);
1400 if (netif_running(dev) && netif_device_present(dev))
1401 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1402 spin_unlock_irq(&np->lock);
1408 static u32 __set_rx_mode(struct net_device *dev)
1410 long ioaddr = dev->base_addr;
1411 u32 mc_filter[2]; /* Multicast hash filter */
1414 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1415 /* Unconditionally log net taps. */
1416 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1417 memset(mc_filter, 0xff, sizeof(mc_filter));
1418 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
1420 } else if ((dev->mc_count > multicast_filter_limit)
1421 || (dev->flags & IFF_ALLMULTI)) {
1422 /* Too many to match, or accept all multicasts. */
1423 memset(mc_filter, 0xff, sizeof(mc_filter));
1424 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1426 struct dev_mc_list *mclist;
1428 memset(mc_filter, 0, sizeof(mc_filter));
1429 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1430 i++, mclist = mclist->next) {
1431 set_bit((ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F,
1434 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1436 writel(mc_filter[0], ioaddr + MulticastFilter0);
1437 writel(mc_filter[1], ioaddr + MulticastFilter1);
1441 static void set_rx_mode(struct net_device *dev)
1443 struct netdev_private *np = dev->priv;
1444 u32 rx_mode = __set_rx_mode(dev);
1445 spin_lock_irq(&np->lock);
1446 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1447 spin_unlock_irq(&np->lock);
1450 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1452 struct netdev_private *np = dev->priv;
1455 if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
1459 case ETHTOOL_GDRVINFO: {
1460 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
1461 strcpy(info.driver, DRV_NAME);
1462 strcpy(info.version, DRV_VERSION);
1463 strcpy(info.bus_info, np->pci_dev->slot_name);
1464 if (copy_to_user(useraddr, &info, sizeof(info)))
1470 case ETHTOOL_GSET: {
1471 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1472 spin_lock_irq(&np->lock);
1473 mii_ethtool_gset(&np->mii_if, &ecmd);
1474 spin_unlock_irq(&np->lock);
1475 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1480 case ETHTOOL_SSET: {
1482 struct ethtool_cmd ecmd;
1483 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1485 spin_lock_irq(&np->lock);
1486 r = mii_ethtool_sset(&np->mii_if, &ecmd);
1487 spin_unlock_irq(&np->lock);
1490 /* restart autonegotiation */
1491 case ETHTOOL_NWAY_RST: {
1492 return mii_nway_restart(&np->mii_if);
1494 /* get link status */
1495 case ETHTOOL_GLINK: {
1496 struct ethtool_value edata = {ETHTOOL_GLINK};
1497 edata.data = mii_link_ok(&np->mii_if);
1498 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1503 /* get message-level */
1504 case ETHTOOL_GMSGLVL: {
1505 struct ethtool_value edata = {ETHTOOL_GMSGLVL};
1507 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1511 /* set message-level */
1512 case ETHTOOL_SMSGLVL: {
1513 struct ethtool_value edata;
1514 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1524 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1526 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
1527 struct netdev_private *np = dev->priv;
1531 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
1532 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1533 case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
1534 data->phy_id = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f;
1537 case SIOCGMIIREG: /* Read MII PHY register. */
1538 case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
1539 spin_lock_irq(&np->lock);
1540 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1541 spin_unlock_irq(&np->lock);
1544 case SIOCSMIIREG: /* Write MII PHY register. */
1545 case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
1546 if (!capable(CAP_NET_ADMIN))
1548 spin_lock_irq(&np->lock);
1549 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1550 spin_unlock_irq(&np->lock);
1557 static int netdev_close(struct net_device *dev)
1559 long ioaddr = dev->base_addr;
1560 struct netdev_private *np = dev->priv;
1562 netif_stop_queue(dev);
1565 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
1566 "Config %8.8x.\n", dev->name, (int)readl(ioaddr + IntrStatus),
1567 (int)readl(ioaddr + NetworkConfig));
1568 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1569 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1572 /* Stop the chip's Tx and Rx processes. */
1573 spin_lock_irq(&np->lock);
1574 netif_device_detach(dev);
1575 update_csr6(dev, 0);
1576 writel(0x0000, ioaddr + IntrEnable);
1577 spin_unlock_irq(&np->lock);
1579 free_irq(dev->irq, dev);
1581 netif_device_attach(dev);
1583 if (readl(ioaddr + NetworkConfig) != 0xffffffff)
1584 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1590 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1592 for (i = 0; i < TX_RING_SIZE; i++)
1593 printk(" #%d desc. %4.4x %4.4x %8.8x.\n",
1594 i, np->tx_ring[i].length,
1595 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1596 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1598 for (i = 0; i < RX_RING_SIZE; i++) {
1599 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1600 i, np->rx_ring[i].length,
1601 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1604 #endif /* __i386__ debugging only */
1606 del_timer_sync(&np->timer);
1608 free_rxtx_rings(np);
1614 static void __devexit w840_remove1 (struct pci_dev *pdev)
1616 struct net_device *dev = pci_get_drvdata(pdev);
1618 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1620 unregister_netdev(dev);
1621 pci_release_regions(pdev);
1623 iounmap((char *)(dev->base_addr));
1628 pci_set_drvdata(pdev, NULL);
1634 * suspend/resume synchronization:
1635 * - open, close, do_ioctl:
1636 * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1638 * spin_lock_irq(np->lock), doesn't touch hw if not present
1639 * - hard_start_xmit:
1640 * netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
1642 * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
1643 * - set_multicast_list
1644 * netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
1645 * - interrupt handler
1646 * doesn't touch hw if not present, synchronize_irq waits for
1647 * running instances of the interrupt handler.
1649 * Disabling hw requires clearing csr6 & IntrEnable.
1650 * update_csr6 & all function that write IntrEnable check netif_device_present
1651 * before settings any bits.
1653 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1654 * device would cause an irq storm.
1656 static int w840_suspend (struct pci_dev *pdev, u32 state)
1658 struct net_device *dev = pci_get_drvdata (pdev);
1659 struct netdev_private *np = dev->priv;
1660 long ioaddr = dev->base_addr;
1663 if (netif_running (dev)) {
1664 del_timer_sync(&np->timer);
1666 spin_lock_irq(&np->lock);
1667 netif_device_detach(dev);
1668 update_csr6(dev, 0);
1669 writel(0, ioaddr + IntrEnable);
1670 netif_stop_queue(dev);
1671 spin_unlock_irq(&np->lock);
1673 spin_unlock_wait(&dev->xmit_lock);
1676 np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
1678 /* no more hardware accesses behind this line. */
1680 if (np->csr6) BUG();
1681 if (readl(ioaddr + IntrEnable)) BUG();
1683 /* pci_power_off(pdev, -1); */
1685 free_rxtx_rings(np);
1687 netif_device_detach(dev);
1694 static int w840_resume (struct pci_dev *pdev)
1696 struct net_device *dev = pci_get_drvdata (pdev);
1697 struct netdev_private *np = dev->priv;
1700 if (netif_device_present(dev))
1701 goto out; /* device not suspended */
1702 if (netif_running(dev)) {
1703 pci_enable_device(pdev);
1704 /* pci_power_on(pdev); */
1706 spin_lock_irq(&np->lock);
1707 writel(1, dev->base_addr+PCIBusCfg);
1708 readl(dev->base_addr+PCIBusCfg);
1710 netif_device_attach(dev);
1711 init_rxtx_rings(dev);
1712 init_registers(dev);
1713 spin_unlock_irq(&np->lock);
1715 netif_wake_queue(dev);
1717 np->timer.expires = jiffies + 1*HZ;
1718 add_timer(&np->timer);
1720 netif_device_attach(dev);
1728 static struct pci_driver w840_driver = {
1730 id_table: w840_pci_tbl,
1732 remove: __devexit_p(w840_remove1),
1734 suspend: w840_suspend,
1735 resume: w840_resume,
1739 static int __init w840_init(void)
1741 /* when a module, this is printed whether or not devices are found in probe */
1745 return pci_module_init(&w840_driver);
1748 static void __exit w840_exit(void)
1750 pci_unregister_driver(&w840_driver);
1753 module_init(w840_init);
1754 module_exit(w840_exit);