1 /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
3 Written/copyright 1997-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 This driver is for the SMC83c170/175 "EPIC" series, as used on the
13 SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
20 Information and updates available at
21 http://www.scyld.com/network/epic100.html
23 ---------------------------------------------------------------------
25 Linux kernel-specific changes:
28 * Merge becker version 1.09 (4/08/2000)
31 * Major bugfix to 1.09 driver (Francis Romieu)
34 * Merge becker test version 1.09 (5/29/2000)
37 * Fix locking (jgarzik)
38 * Limit 83c175 probe to ethernet-class PCI devices (rgooch)
41 * Merge becker version 1.11
42 * Move pci_enable_device before any PCI BAR len checks
48 * ethtool driver info support (jgarzik)
51 * ethtool media get/set support (jgarzik)
54 * revert MII transceiver init change (jgarzik)
57 * implement ETHTOOL_[GS]SET, _NWAY_RST, _[GS]MSGLVL, _GLINK (jgarzik)
58 * replace some MII-related magic numbers with constants
61 * fix power-up sequence
64 * revert version 1.1.12, power-up sequence "fix"
66 LK1.1.14 (Kryzsztof Halasa):
67 * fix spurious bad initializations
68 * pound phy a la SMSC's app note on the subject
72 #define DRV_NAME "epic100"
73 #define DRV_VERSION "1.11+LK1.1.14"
74 #define DRV_RELDATE "Aug 4, 2002"
77 /* The user-configurable values.
78 These may be modified when a driver module is loaded.*/
80 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
81 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
82 static int max_interrupt_work = 32;
84 /* Used to pass the full-duplex flag, etc. */
85 #define MAX_UNITS 8 /* More are supported, limit only on options */
86 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
87 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
89 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
90 Setting to > 1518 effectively disables this feature. */
91 static int rx_copybreak;
93 /* Operational parameters that are set at compile time. */
95 /* Keep the ring sizes a power of two for operational efficiency.
96 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
97 Making the Tx ring too large decreases the effectiveness of channel
98 bonding and packet priority.
99 There are no ill effects from too-large receive rings. */
100 #define TX_RING_SIZE 16
101 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
102 #define RX_RING_SIZE 32
103 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
104 #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
106 /* Operational parameters that usually are not changed. */
107 /* Time in jiffies before concluding the transmitter is hung. */
108 #define TX_TIMEOUT (2*HZ)
110 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
112 /* Bytes transferred to chip before transmission starts. */
113 /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
114 #define TX_FIFO_THRESH 256
115 #define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
117 #if !defined(__OPTIMIZE__)
118 #warning You must compile this file with the correct options!
119 #warning See the last lines of the source file.
120 #error You must compile this driver with "-O".
123 #include <linux/config.h>
124 #include <linux/module.h>
125 #include <linux/kernel.h>
126 #include <linux/string.h>
127 #include <linux/timer.h>
128 #include <linux/errno.h>
129 #include <linux/ioport.h>
130 #include <linux/slab.h>
131 #include <linux/interrupt.h>
132 #include <linux/pci.h>
133 #include <linux/delay.h>
134 #include <linux/netdevice.h>
135 #include <linux/etherdevice.h>
136 #include <linux/skbuff.h>
137 #include <linux/init.h>
138 #include <linux/spinlock.h>
139 #include <linux/ethtool.h>
140 #include <linux/mii.h>
141 #include <linux/crc32.h>
142 #include <asm/bitops.h>
144 #include <asm/uaccess.h>
146 /* These identify the driver base version and may not be removed. */
147 static char version[] __devinitdata =
148 DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>\n";
149 static char version2[] __devinitdata =
150 " http://www.scyld.com/network/epic100.html\n";
151 static char version3[] __devinitdata =
152 " (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
154 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
155 MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
156 MODULE_LICENSE("GPL");
158 MODULE_PARM(debug, "i");
159 MODULE_PARM(max_interrupt_work, "i");
160 MODULE_PARM(rx_copybreak, "i");
161 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
162 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
163 MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
164 MODULE_PARM_DESC(max_interrupt_work, "EPIC/100 maximum events handled per interrupt");
165 MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
166 MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
167 MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
172 I. Board Compatibility
174 This device driver is designed for the SMC "EPIC/100", the SMC
175 single-chip Ethernet controllers for PCI. This chip is used on
176 the SMC EtherPower II boards.
178 II. Board-specific settings
180 PCI bus devices are configured by the system at boot time, so no jumpers
181 need to be set on the board. The system BIOS will assign the
182 PCI INTA signal to a (preferably otherwise unused) system IRQ line.
183 Note: Kernel versions earlier than 1.3.73 do not support shared PCI
186 III. Driver operation
192 http://www.smsc.com/main/datasheets/83c171.pdf
193 http://www.smsc.com/main/datasheets/83c175.pdf
194 http://scyld.com/expert/NWay.html
195 http://www.national.com/pf/DP/DP83840A.html
202 enum pci_id_flags_bits {
203 /* Set PCI command register bits before calling probe1(). */
204 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
205 /* Read and map the single following PCI BAR. */
206 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
207 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
210 enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
212 #define EPIC_TOTAL_SIZE 0x100
215 #define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR0
217 #define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
227 struct epic_chip_info {
229 enum pci_id_flags_bits pci_flags;
230 int io_size; /* Needed for I/O region check or ioremap(). */
231 int drv_flags; /* Driver use, intended as capability flags. */
235 /* indexed by chip_t */
236 static struct epic_chip_info pci_id_tbl[] = {
237 { "SMSC EPIC/100 83c170",
238 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN },
239 { "SMSC EPIC/100 83c170",
240 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR },
241 { "SMSC EPIC/C 83c175",
242 EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN },
246 static struct pci_device_id epic_pci_tbl[] __devinitdata = {
247 { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
248 { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
249 { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
250 PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
253 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
271 /* Offsets to registers, using the (ugh) SMC names. */
272 enum epic_registers {
273 COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
275 TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
276 MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
277 LAN0=64, /* MAC address. */
278 MC0=80, /* Multicast filter table. */
279 RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
280 PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
283 /* Interrupt register bits, using my own meaningful names. */
285 TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
286 PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
287 RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
288 TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
289 RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
292 StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
293 StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
296 static u16 media2miictl[16] = {
297 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
298 0, 0, 0, 0, 0, 0, 0, 0 };
300 /* The EPIC100 Rx and Tx buffer descriptors. */
302 struct epic_tx_desc {
309 struct epic_rx_desc {
316 enum desc_status_bits {
320 #define PRIV_ALIGN 15 /* Required alignment mask */
321 struct epic_private {
322 struct epic_rx_desc *rx_ring;
323 struct epic_tx_desc *tx_ring;
324 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
325 struct sk_buff* tx_skbuff[TX_RING_SIZE];
326 /* The addresses of receive-in-place skbuffs. */
327 struct sk_buff* rx_skbuff[RX_RING_SIZE];
329 dma_addr_t tx_ring_dma;
330 dma_addr_t rx_ring_dma;
333 spinlock_t lock; /* Group with Tx control cache line. */
334 unsigned int cur_tx, dirty_tx;
336 unsigned int cur_rx, dirty_rx;
337 unsigned int rx_buf_sz; /* Based on MTU+slack. */
339 struct pci_dev *pci_dev; /* PCI bus location. */
340 int chip_id, chip_flags;
342 struct net_device_stats stats;
343 struct timer_list timer; /* Media selection timer. */
345 unsigned char mc_filter[8];
346 signed char phys[4]; /* MII device addresses. */
347 u16 advertising; /* NWay media advertisement */
349 struct mii_if_info mii;
350 unsigned int tx_full:1; /* The Tx queue is full. */
351 unsigned int default_port:4; /* Last dev->if_port value. */
354 static int epic_open(struct net_device *dev);
355 static int read_eeprom(long ioaddr, int location);
356 static int mdio_read(struct net_device *dev, int phy_id, int location);
357 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
358 static void epic_restart(struct net_device *dev);
359 static void epic_timer(unsigned long data);
360 static void epic_tx_timeout(struct net_device *dev);
361 static void epic_init_ring(struct net_device *dev);
362 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
363 static int epic_rx(struct net_device *dev);
364 static void epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
365 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
366 static int epic_close(struct net_device *dev);
367 static struct net_device_stats *epic_get_stats(struct net_device *dev);
368 static void set_rx_mode(struct net_device *dev);
372 static int __devinit epic_init_one (struct pci_dev *pdev,
373 const struct pci_device_id *ent)
375 static int card_idx = -1;
377 int chip_idx = (int) ent->driver_data;
379 struct net_device *dev;
380 struct epic_private *ep;
381 int i, option = 0, duplex = 0;
385 /* when built into the kernel, we only print version if device is found */
387 static int printed_version;
388 if (!printed_version++)
389 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
390 version, version2, version3);
395 i = pci_enable_device(pdev);
400 if (pci_resource_len(pdev, 0) < pci_id_tbl[chip_idx].io_size) {
401 printk (KERN_ERR "card %d: no PCI region space\n", card_idx);
405 pci_set_master(pdev);
407 dev = alloc_etherdev(sizeof (*ep));
409 printk (KERN_ERR "card %d: no memory for eth device\n", card_idx);
412 SET_MODULE_OWNER(dev);
414 if (pci_request_regions(pdev, DRV_NAME))
415 goto err_out_free_netdev;
418 ioaddr = pci_resource_start (pdev, 0);
420 ioaddr = pci_resource_start (pdev, 1);
421 ioaddr = (long) ioremap (ioaddr, pci_resource_len (pdev, 1));
423 printk (KERN_ERR DRV_NAME " %d: ioremap failed\n", card_idx);
424 goto err_out_free_res;
428 pci_set_drvdata(pdev, dev);
431 ep->mii.mdio_read = mdio_read;
432 ep->mii.mdio_write = mdio_write;
433 ep->mii.phy_id_mask = 0x1f;
434 ep->mii.reg_num_mask = 0x1f;
436 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
438 goto err_out_iounmap;
439 ep->tx_ring = (struct epic_tx_desc *)ring_space;
440 ep->tx_ring_dma = ring_dma;
442 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
444 goto err_out_unmap_tx;
445 ep->rx_ring = (struct epic_rx_desc *)ring_space;
446 ep->rx_ring_dma = ring_dma;
448 if (dev->mem_start) {
449 option = dev->mem_start;
450 duplex = (dev->mem_start & 16) ? 1 : 0;
451 } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
452 if (options[card_idx] >= 0)
453 option = options[card_idx];
454 if (full_duplex[card_idx] >= 0)
455 duplex = full_duplex[card_idx];
458 dev->base_addr = ioaddr;
461 spin_lock_init (&ep->lock);
463 /* Bring the chip out of low-power mode. */
464 outl(0x4200, ioaddr + GENCTL);
465 /* Magic?! If we don't set this bit the MII interface won't work. */
466 /* This magic is documented in SMSC app note 7.15 */
467 for (i = 16; i > 0; i--)
468 outl(0x0008, ioaddr + TEST1);
470 /* Turn on the MII transceiver. */
471 outl(0x12, ioaddr + MIICfg);
473 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
474 outl(0x0200, ioaddr + GENCTL);
476 /* Note: the '175 does not have a serial EEPROM. */
477 for (i = 0; i < 3; i++)
478 ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
481 printk(KERN_DEBUG DRV_NAME "(%s): EEPROM contents\n",
483 for (i = 0; i < 64; i++)
484 printk(" %4.4x%s", read_eeprom(ioaddr, i),
485 i % 16 == 15 ? "\n" : "");
489 ep->chip_id = chip_idx;
490 ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
492 /* Find the connected MII xcvrs.
493 Doing this in open() would allow detecting external xcvrs later, but
494 takes much time and no cards have external MII. */
496 int phy, phy_idx = 0;
497 for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
498 int mii_status = mdio_read(dev, phy, MII_BMSR);
499 if (mii_status != 0xffff && mii_status != 0x0000) {
500 ep->phys[phy_idx++] = phy;
501 printk(KERN_INFO DRV_NAME "(%s): MII transceiver #%d control "
502 "%4.4x status %4.4x.\n",
503 pdev->slot_name, phy, mdio_read(dev, phy, 0), mii_status);
506 ep->mii_phy_cnt = phy_idx;
509 ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
510 printk(KERN_INFO DRV_NAME "(%s): Autonegotiation advertising %4.4x link "
512 pdev->slot_name, ep->mii.advertising, mdio_read(dev, phy, 5));
513 } else if ( ! (ep->chip_flags & NO_MII)) {
514 printk(KERN_WARNING DRV_NAME "(%s): ***WARNING***: No MII transceiver found!\n",
516 /* Use the known PHY address of the EPII. */
519 ep->mii.phy_id = ep->phys[0];
522 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
523 if (ep->chip_flags & MII_PWRDWN)
524 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
525 outl(0x0008, ioaddr + GENCTL);
527 /* The lower four bits are the media type. */
529 ep->mii.force_media = ep->mii.full_duplex = 1;
530 printk(KERN_INFO DRV_NAME "(%s): Forced full duplex operation requested.\n",
533 dev->if_port = ep->default_port = option;
535 /* The Epic-specific entries in the device structure. */
536 dev->open = &epic_open;
537 dev->hard_start_xmit = &epic_start_xmit;
538 dev->stop = &epic_close;
539 dev->get_stats = &epic_get_stats;
540 dev->set_multicast_list = &set_rx_mode;
541 dev->do_ioctl = &netdev_ioctl;
542 dev->watchdog_timeo = TX_TIMEOUT;
543 dev->tx_timeout = &epic_tx_timeout;
545 i = register_netdev(dev);
547 goto err_out_unmap_tx;
549 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
550 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
551 for (i = 0; i < 5; i++)
552 printk("%2.2x:", dev->dev_addr[i]);
553 printk("%2.2x.\n", dev->dev_addr[i]);
558 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
564 pci_release_regions(pdev);
570 /* Serial EEPROM section. */
572 /* EEPROM_Ctrl bits. */
573 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
574 #define EE_CS 0x02 /* EEPROM chip select. */
575 #define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
576 #define EE_WRITE_0 0x01
577 #define EE_WRITE_1 0x09
578 #define EE_DATA_READ 0x10 /* EEPROM chip data out. */
579 #define EE_ENB (0x0001 | EE_CS)
581 /* Delay between EEPROM clock transitions.
582 This serves to flush the operation to the PCI bus.
585 #define eeprom_delay() inl(ee_addr)
587 /* The EEPROM commands include the alway-set leading bit. */
588 #define EE_WRITE_CMD (5 << 6)
589 #define EE_READ64_CMD (6 << 6)
590 #define EE_READ256_CMD (6 << 8)
591 #define EE_ERASE_CMD (7 << 6)
593 static int __devinit read_eeprom(long ioaddr, int location)
597 long ee_addr = ioaddr + EECTL;
598 int read_cmd = location |
599 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
601 outl(EE_ENB & ~EE_CS, ee_addr);
602 outl(EE_ENB, ee_addr);
604 /* Shift the read command bits out. */
605 for (i = 12; i >= 0; i--) {
606 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
607 outl(EE_ENB | dataval, ee_addr);
609 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
612 outl(EE_ENB, ee_addr);
614 for (i = 16; i > 0; i--) {
615 outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
617 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
618 outl(EE_ENB, ee_addr);
622 /* Terminate the EEPROM access. */
623 outl(EE_ENB & ~EE_CS, ee_addr);
628 #define MII_WRITEOP 2
629 static int mdio_read(struct net_device *dev, int phy_id, int location)
631 long ioaddr = dev->base_addr;
632 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
635 outl(read_cmd, ioaddr + MIICtrl);
636 /* Typical operation takes 25 loops. */
637 for (i = 400; i > 0; i--) {
639 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
640 /* Work around read failure bug. */
641 if (phy_id == 1 && location < 6
642 && inw(ioaddr + MIIData) == 0xffff) {
643 outl(read_cmd, ioaddr + MIICtrl);
646 return inw(ioaddr + MIIData);
652 static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
654 long ioaddr = dev->base_addr;
657 outw(value, ioaddr + MIIData);
658 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
659 for (i = 10000; i > 0; i--) {
661 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
668 static int epic_open(struct net_device *dev)
670 struct epic_private *ep = dev->priv;
671 long ioaddr = dev->base_addr;
675 /* Soft reset the chip. */
676 outl(0x4001, ioaddr + GENCTL);
678 if ((retval = request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, dev->name, dev)))
683 outl(0x4000, ioaddr + GENCTL);
684 /* This magic is documented in SMSC app note 7.15 */
685 for (i = 16; i > 0; i--)
686 outl(0x0008, ioaddr + TEST1);
688 /* Pull the chip out of low-power mode, enable interrupts, and set for
689 PCI read multiple. The MIIcfg setting and strange write order are
690 required by the details of which bits are reset and the transceiver
691 wiring on the Ositech CardBus card.
694 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
696 if (ep->chip_flags & MII_PWRDWN)
697 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
699 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
700 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
701 inl(ioaddr + GENCTL);
702 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
704 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
705 inl(ioaddr + GENCTL);
706 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
709 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
711 for (i = 0; i < 3; i++)
712 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
714 ep->tx_threshold = TX_FIFO_THRESH;
715 outl(ep->tx_threshold, ioaddr + TxThresh);
717 if (media2miictl[dev->if_port & 15]) {
719 mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
720 if (dev->if_port == 1) {
722 printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
724 dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
727 int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
728 if (mii_lpa != 0xffff) {
729 if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
730 ep->mii.full_duplex = 1;
731 else if (! (mii_lpa & LPA_LPACK))
732 mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
734 printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
735 " register read of %4.4x.\n", dev->name,
736 ep->mii.full_duplex ? "full" : "half",
737 ep->phys[0], mii_lpa);
741 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
742 outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
743 outl(ep->tx_ring_dma, ioaddr + PTxCDAR);
745 /* Start the chip's Rx process. */
747 outl(StartRx | RxQueued, ioaddr + COMMAND);
749 netif_start_queue(dev);
751 /* Enable interrupts by setting the interrupt mask. */
752 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
753 | CntFull | TxUnderrun | TxDone | TxEmpty
754 | RxError | RxOverflow | RxFull | RxHeader | RxDone,
758 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
760 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
761 ep->mii.full_duplex ? "full" : "half");
763 /* Set the timer to switch to check for link beat and perhaps switch
764 to an alternate media type. */
765 init_timer(&ep->timer);
766 ep->timer.expires = jiffies + 3*HZ;
767 ep->timer.data = (unsigned long)dev;
768 ep->timer.function = &epic_timer; /* timer handler */
769 add_timer(&ep->timer);
774 /* Reset the chip to recover from a PCI transaction error.
775 This may occur at interrupt time. */
776 static void epic_pause(struct net_device *dev)
778 long ioaddr = dev->base_addr;
779 struct epic_private *ep = dev->priv;
781 netif_stop_queue (dev);
783 /* Disable interrupts by clearing the interrupt mask. */
784 outl(0x00000000, ioaddr + INTMASK);
785 /* Stop the chip's Tx and Rx DMA processes. */
786 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
788 /* Update the error counts. */
789 if (inw(ioaddr + COMMAND) != 0xffff) {
790 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
791 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
792 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
795 /* Remove the packets on the Rx queue. */
799 static void epic_restart(struct net_device *dev)
801 long ioaddr = dev->base_addr;
802 struct epic_private *ep = dev->priv;
805 /* Soft reset the chip. */
806 outl(0x4001, ioaddr + GENCTL);
808 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
809 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
812 /* This magic is documented in SMSC app note 7.15 */
813 for (i = 16; i > 0; i--)
814 outl(0x0008, ioaddr + TEST1);
816 #if defined(__powerpc__) || defined(__sparc__) /* Big endian */
817 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
819 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
821 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
822 if (ep->chip_flags & MII_PWRDWN)
823 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
825 for (i = 0; i < 3; i++)
826 outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
828 ep->tx_threshold = TX_FIFO_THRESH;
829 outl(ep->tx_threshold, ioaddr + TxThresh);
830 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
831 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)*
832 sizeof(struct epic_rx_desc), ioaddr + PRxCDAR);
833 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)*
834 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR);
836 /* Start the chip's Rx process. */
838 outl(StartRx | RxQueued, ioaddr + COMMAND);
840 /* Enable interrupts by setting the interrupt mask. */
841 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
842 | CntFull | TxUnderrun | TxDone | TxEmpty
843 | RxError | RxOverflow | RxFull | RxHeader | RxDone,
845 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
846 " interrupt %4.4x.\n",
847 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
848 (int)inl(ioaddr + INTSTAT));
852 static void check_media(struct net_device *dev)
854 struct epic_private *ep = dev->priv;
855 long ioaddr = dev->base_addr;
856 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
857 int negotiated = mii_lpa & ep->mii.advertising;
858 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
860 if (ep->mii.force_media)
862 if (mii_lpa == 0xffff) /* Bogus read */
864 if (ep->mii.full_duplex != duplex) {
865 ep->mii.full_duplex = duplex;
866 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
867 " partner capability of %4.4x.\n", dev->name,
868 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
869 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
873 static void epic_timer(unsigned long data)
875 struct net_device *dev = (struct net_device *)data;
876 struct epic_private *ep = dev->priv;
877 long ioaddr = dev->base_addr;
878 int next_tick = 5*HZ;
881 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
882 dev->name, (int)inl(ioaddr + TxSTAT));
883 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
884 "IntStatus %4.4x RxStatus %4.4x.\n",
885 dev->name, (int)inl(ioaddr + INTMASK),
886 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
891 ep->timer.expires = jiffies + next_tick;
892 add_timer(&ep->timer);
895 static void epic_tx_timeout(struct net_device *dev)
897 struct epic_private *ep = dev->priv;
898 long ioaddr = dev->base_addr;
901 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
902 "Tx status %4.4x.\n",
903 dev->name, (int)inw(ioaddr + TxSTAT));
905 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
906 dev->name, ep->dirty_tx, ep->cur_tx);
909 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
910 ep->stats.tx_fifo_errors++;
911 outl(RestartTx, ioaddr + COMMAND);
914 outl(TxQueued, dev->base_addr + COMMAND);
917 dev->trans_start = jiffies;
918 ep->stats.tx_errors++;
920 netif_wake_queue(dev);
923 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
924 static void epic_init_ring(struct net_device *dev)
926 struct epic_private *ep = dev->priv;
930 ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
931 ep->dirty_tx = ep->cur_tx = 0;
932 ep->cur_rx = ep->dirty_rx = 0;
933 ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
935 /* Initialize all Rx descriptors. */
936 for (i = 0; i < RX_RING_SIZE; i++) {
937 ep->rx_ring[i].rxstatus = 0;
938 ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
939 ep->rx_ring[i].next = ep->rx_ring_dma +
940 (i+1)*sizeof(struct epic_rx_desc);
941 ep->rx_skbuff[i] = 0;
943 /* Mark the last entry as wrapping the ring. */
944 ep->rx_ring[i-1].next = ep->rx_ring_dma;
946 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
947 for (i = 0; i < RX_RING_SIZE; i++) {
948 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
949 ep->rx_skbuff[i] = skb;
952 skb->dev = dev; /* Mark as being used by this device. */
953 skb_reserve(skb, 2); /* 16 byte align the IP header. */
954 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
955 skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
956 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
958 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
960 /* The Tx buffer descriptor is filled in as needed, but we
961 do need to clear the ownership bit. */
962 for (i = 0; i < TX_RING_SIZE; i++) {
963 ep->tx_skbuff[i] = 0;
964 ep->tx_ring[i].txstatus = 0x0000;
965 ep->tx_ring[i].next = ep->tx_ring_dma +
966 (i+1)*sizeof(struct epic_tx_desc);
968 ep->tx_ring[i-1].next = ep->tx_ring_dma;
972 static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
974 struct epic_private *ep = dev->priv;
975 int entry, free_count;
979 /* Caution: the write order is important here, set the field with the
980 "ownership" bit last. */
982 /* Calculate the next Tx descriptor entry. */
983 spin_lock_irqsave(&ep->lock, flags);
984 free_count = ep->cur_tx - ep->dirty_tx;
985 entry = ep->cur_tx % TX_RING_SIZE;
987 ep->tx_skbuff[entry] = skb;
988 ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
989 skb->len, PCI_DMA_TODEVICE);
990 if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
991 ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
992 } else if (free_count == TX_QUEUE_LEN/2) {
993 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
994 } else if (free_count < TX_QUEUE_LEN - 1) {
995 ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
997 /* Leave room for an additional entry. */
998 ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
1001 ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
1002 ep->tx_ring[entry].txstatus =
1003 ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
1004 | cpu_to_le32(DescOwn);
1008 netif_stop_queue(dev);
1010 spin_unlock_irqrestore(&ep->lock, flags);
1011 /* Trigger an immediate transmit demand. */
1012 outl(TxQueued, dev->base_addr + COMMAND);
1014 dev->trans_start = jiffies;
1016 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1017 "flag %2.2x Tx status %8.8x.\n",
1018 dev->name, (int)skb->len, entry, ctrl_word,
1019 (int)inl(dev->base_addr + TxSTAT));
1024 /* The interrupt handler does all of the Rx thread work and cleans up
1025 after the Tx thread. */
1026 static void epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1028 struct net_device *dev = dev_instance;
1029 struct epic_private *ep = dev->priv;
1030 long ioaddr = dev->base_addr;
1031 int status, boguscnt = max_interrupt_work;
1034 status = inl(ioaddr + INTSTAT);
1035 /* Acknowledge all of the current interrupt sources ASAP. */
1036 outl(status & 0x00007fff, ioaddr + INTSTAT);
1039 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1040 "intstat=%#8.8x.\n",
1041 dev->name, status, (int)inl(ioaddr + INTSTAT));
1043 if ((status & IntrSummary) == 0)
1046 if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow))
1049 if (status & (TxEmpty | TxDone)) {
1050 unsigned int dirty_tx, cur_tx;
1052 /* Note: if this lock becomes a problem we can narrow the locked
1053 region at the cost of occasionally grabbing the lock more
1055 spin_lock(&ep->lock);
1056 cur_tx = ep->cur_tx;
1057 dirty_tx = ep->dirty_tx;
1058 for (; cur_tx - dirty_tx > 0; dirty_tx++) {
1059 struct sk_buff *skb;
1060 int entry = dirty_tx % TX_RING_SIZE;
1061 int txstatus = le32_to_cpu(ep->tx_ring[entry].txstatus);
1063 if (txstatus & DescOwn)
1064 break; /* It still hasn't been Txed */
1066 if ( ! (txstatus & 0x0001)) {
1067 /* There was an major error, log it. */
1068 #ifndef final_version
1070 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1071 dev->name, txstatus);
1073 ep->stats.tx_errors++;
1074 if (txstatus & 0x1050) ep->stats.tx_aborted_errors++;
1075 if (txstatus & 0x0008) ep->stats.tx_carrier_errors++;
1076 if (txstatus & 0x0040) ep->stats.tx_window_errors++;
1077 if (txstatus & 0x0010) ep->stats.tx_fifo_errors++;
1079 ep->stats.collisions += (txstatus >> 8) & 15;
1080 ep->stats.tx_packets++;
1081 ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1084 /* Free the original skb. */
1085 skb = ep->tx_skbuff[entry];
1086 pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
1087 skb->len, PCI_DMA_TODEVICE);
1088 dev_kfree_skb_irq(skb);
1089 ep->tx_skbuff[entry] = 0;
1092 #ifndef final_version
1093 if (cur_tx - dirty_tx > TX_RING_SIZE) {
1094 printk(KERN_WARNING "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1095 dev->name, dirty_tx, cur_tx, ep->tx_full);
1096 dirty_tx += TX_RING_SIZE;
1099 ep->dirty_tx = dirty_tx;
1101 && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1102 /* The ring is no longer full, allow new TX entries. */
1104 spin_unlock(&ep->lock);
1105 netif_wake_queue(dev);
1107 spin_unlock(&ep->lock);
1110 /* Check uncommon events all at once. */
1111 if (status & (CntFull | TxUnderrun | RxOverflow | RxFull |
1112 PCIBusErr170 | PCIBusErr175)) {
1113 if (status == 0xffffffff) /* Chip failed or removed (CardBus). */
1115 /* Always update the error counts to avoid overhead later. */
1116 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1117 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1118 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1120 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1121 ep->stats.tx_fifo_errors++;
1122 outl(ep->tx_threshold += 128, ioaddr + TxThresh);
1123 /* Restart the transmit process. */
1124 outl(RestartTx, ioaddr + COMMAND);
1126 if (status & RxOverflow) { /* Missed a Rx frame. */
1127 ep->stats.rx_errors++;
1129 if (status & (RxOverflow | RxFull))
1130 outw(RxQueued, ioaddr + COMMAND);
1131 if (status & PCIBusErr170) {
1132 printk(KERN_ERR "%s: PCI Bus Error! EPIC status %4.4x.\n",
1137 /* Clear all error sources. */
1138 outl(status & 0x7f18, ioaddr + INTSTAT);
1140 if (--boguscnt < 0) {
1141 printk(KERN_ERR "%s: Too much work at interrupt, "
1142 "IntrStatus=0x%8.8x.\n",
1144 /* Clear all interrupt sources. */
1145 outl(0x0001ffff, ioaddr + INTSTAT);
1151 printk(KERN_DEBUG "%s: exiting interrupt, intr_status=%#4.4x.\n",
1157 static int epic_rx(struct net_device *dev)
1159 struct epic_private *ep = dev->priv;
1160 int entry = ep->cur_rx % RX_RING_SIZE;
1161 int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1165 printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
1166 ep->rx_ring[entry].rxstatus);
1167 /* If we own the next entry, it's a new packet. Send it up. */
1168 while ((ep->rx_ring[entry].rxstatus & cpu_to_le32(DescOwn)) == 0) {
1169 int status = le32_to_cpu(ep->rx_ring[entry].rxstatus);
1172 printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
1173 if (--rx_work_limit < 0)
1175 if (status & 0x2006) {
1177 printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
1179 if (status & 0x2000) {
1180 printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
1181 "multiple buffers, status %4.4x!\n", dev->name, status);
1182 ep->stats.rx_length_errors++;
1183 } else if (status & 0x0006)
1184 /* Rx Frame errors are counted in hardware. */
1185 ep->stats.rx_errors++;
1187 /* Malloc up new buffer, compatible with net-2e. */
1188 /* Omit the four octet CRC from the length. */
1189 short pkt_len = (status >> 16) - 4;
1190 struct sk_buff *skb;
1192 pci_dma_sync_single(ep->pci_dev, ep->rx_ring[entry].bufaddr,
1193 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1194 if (pkt_len > PKT_BUF_SZ - 4) {
1195 printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
1197 dev->name, status, pkt_len);
1200 /* Check if the packet is long enough to accept without copying
1201 to a minimally-sized skbuff. */
1202 if (pkt_len < rx_copybreak
1203 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1205 skb_reserve(skb, 2); /* 16 byte align the IP header */
1206 #if 1 /* HAS_IP_COPYSUM */
1207 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0);
1208 skb_put(skb, pkt_len);
1210 memcpy(skb_put(skb, pkt_len), ep->rx_skbuff[entry]->tail,
1214 pci_unmap_single(ep->pci_dev,
1215 ep->rx_ring[entry].bufaddr,
1216 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1217 skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1218 ep->rx_skbuff[entry] = NULL;
1220 skb->protocol = eth_type_trans(skb, dev);
1222 dev->last_rx = jiffies;
1223 ep->stats.rx_packets++;
1224 ep->stats.rx_bytes += pkt_len;
1227 entry = (++ep->cur_rx) % RX_RING_SIZE;
1230 /* Refill the Rx ring buffers. */
1231 for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1232 entry = ep->dirty_rx % RX_RING_SIZE;
1233 if (ep->rx_skbuff[entry] == NULL) {
1234 struct sk_buff *skb;
1235 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
1238 skb->dev = dev; /* Mark as being used by this device. */
1239 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1240 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1241 skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1244 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
1249 static int epic_close(struct net_device *dev)
1251 long ioaddr = dev->base_addr;
1252 struct epic_private *ep = dev->priv;
1253 struct sk_buff *skb;
1256 netif_stop_queue(dev);
1259 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1260 dev->name, (int)inl(ioaddr + INTSTAT));
1262 del_timer_sync(&ep->timer);
1264 free_irq(dev->irq, dev);
1266 /* Free all the skbuffs in the Rx queue. */
1267 for (i = 0; i < RX_RING_SIZE; i++) {
1268 skb = ep->rx_skbuff[i];
1269 ep->rx_skbuff[i] = 0;
1270 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1271 ep->rx_ring[i].buflength = 0;
1273 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
1274 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1277 ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1279 for (i = 0; i < TX_RING_SIZE; i++) {
1280 skb = ep->tx_skbuff[i];
1281 ep->tx_skbuff[i] = 0;
1284 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
1285 skb->len, PCI_DMA_TODEVICE);
1289 /* Green! Leave the chip in low-power mode. */
1290 outl(0x0008, ioaddr + GENCTL);
1295 static struct net_device_stats *epic_get_stats(struct net_device *dev)
1297 struct epic_private *ep = dev->priv;
1298 long ioaddr = dev->base_addr;
1300 if (netif_running(dev)) {
1301 /* Update the error counts. */
1302 ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
1303 ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
1304 ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
1310 /* Set or clear the multicast filter for this adaptor.
1311 Note that we only use exclusion around actually queueing the
1312 new frame, not around filling ep->setup_frame. This is non-deterministic
1313 when re-entered but still correct. */
1315 static void set_rx_mode(struct net_device *dev)
1317 long ioaddr = dev->base_addr;
1318 struct epic_private *ep = dev->priv;
1319 unsigned char mc_filter[8]; /* Multicast hash filter */
1322 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1323 outl(0x002C, ioaddr + RxCtrl);
1324 /* Unconditionally log net taps. */
1325 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1326 memset(mc_filter, 0xff, sizeof(mc_filter));
1327 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
1328 /* There is apparently a chip bug, so the multicast filter
1329 is never enabled. */
1330 /* Too many to filter perfectly -- accept all multicasts. */
1331 memset(mc_filter, 0xff, sizeof(mc_filter));
1332 outl(0x000C, ioaddr + RxCtrl);
1333 } else if (dev->mc_count == 0) {
1334 outl(0x0004, ioaddr + RxCtrl);
1336 } else { /* Never executed, for now. */
1337 struct dev_mc_list *mclist;
1339 memset(mc_filter, 0, sizeof(mc_filter));
1340 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1341 i++, mclist = mclist->next)
1342 set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
1345 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1346 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1347 for (i = 0; i < 4; i++)
1348 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1349 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1354 static int netdev_ethtool_ioctl (struct net_device *dev, void *useraddr)
1356 struct epic_private *np = dev->priv;
1359 if (copy_from_user (ðcmd, useraddr, sizeof (ethcmd)))
1363 case ETHTOOL_GDRVINFO: {
1364 struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1365 strcpy (info.driver, DRV_NAME);
1366 strcpy (info.version, DRV_VERSION);
1367 strcpy (info.bus_info, np->pci_dev->slot_name);
1368 if (copy_to_user (useraddr, &info, sizeof (info)))
1374 case ETHTOOL_GSET: {
1375 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1376 spin_lock_irq(&np->lock);
1377 mii_ethtool_gset(&np->mii, &ecmd);
1378 spin_unlock_irq(&np->lock);
1379 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1384 case ETHTOOL_SSET: {
1386 struct ethtool_cmd ecmd;
1387 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1389 spin_lock_irq(&np->lock);
1390 r = mii_ethtool_sset(&np->mii, &ecmd);
1391 spin_unlock_irq(&np->lock);
1394 /* restart autonegotiation */
1395 case ETHTOOL_NWAY_RST: {
1396 return mii_nway_restart(&np->mii);
1398 /* get link status */
1399 case ETHTOOL_GLINK: {
1400 struct ethtool_value edata = {ETHTOOL_GLINK};
1401 edata.data = mii_link_ok(&np->mii);
1402 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1407 /* get message-level */
1408 case ETHTOOL_GMSGLVL: {
1409 struct ethtool_value edata = {ETHTOOL_GMSGLVL};
1411 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1415 /* set message-level */
1416 case ETHTOOL_SMSGLVL: {
1417 struct ethtool_value edata;
1418 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1430 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1432 struct epic_private *np = dev->priv;
1433 long ioaddr = dev->base_addr;
1434 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
1437 /* power-up, if interface is down */
1438 if (! netif_running(dev)) {
1439 outl(0x0200, ioaddr + GENCTL);
1440 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
1443 /* ethtool commands */
1444 if (cmd == SIOCETHTOOL)
1445 rc = netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
1447 /* all other ioctls (the SIOC[GS]MIIxxx ioctls) */
1449 spin_lock_irq(&np->lock);
1450 rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1451 spin_unlock_irq(&np->lock);
1454 /* power-down, if interface is down */
1455 if (! netif_running(dev)) {
1456 outl(0x0008, ioaddr + GENCTL);
1457 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
1463 static void __devexit epic_remove_one (struct pci_dev *pdev)
1465 struct net_device *dev = pci_get_drvdata(pdev);
1466 struct epic_private *ep = dev->priv;
1468 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1469 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1470 unregister_netdev(dev);
1472 iounmap((void*) dev->base_addr);
1474 pci_release_regions(pdev);
1476 pci_set_drvdata(pdev, NULL);
1477 /* pci_power_off(pdev, -1); */
1483 static int epic_suspend (struct pci_dev *pdev, u32 state)
1485 struct net_device *dev = pci_get_drvdata(pdev);
1486 long ioaddr = dev->base_addr;
1488 if (!netif_running(dev))
1491 /* Put the chip into low-power mode. */
1492 outl(0x0008, ioaddr + GENCTL);
1493 /* pci_power_off(pdev, -1); */
1498 static int epic_resume (struct pci_dev *pdev)
1500 struct net_device *dev = pci_get_drvdata(pdev);
1502 if (!netif_running(dev))
1505 /* pci_power_on(pdev); */
1509 #endif /* CONFIG_PM */
1512 static struct pci_driver epic_driver = {
1514 id_table: epic_pci_tbl,
1515 probe: epic_init_one,
1516 remove: __devexit_p(epic_remove_one),
1518 suspend: epic_suspend,
1519 resume: epic_resume,
1520 #endif /* CONFIG_PM */
1524 static int __init epic_init (void)
1526 /* when a module, this is printed whether or not devices are found in probe */
1528 printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
1529 version, version2, version3);
1532 return pci_module_init (&epic_driver);
1536 static void __exit epic_cleanup (void)
1538 pci_unregister_driver (&epic_driver);
1542 module_init(epic_init);
1543 module_exit(epic_cleanup);