1 /* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
3 Written 1996-1999 by Donald Becker.
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
30 static const char *version =
31 "eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n"
32 "eepro100.c: $Revision: 1.1.1.1 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
34 /* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
37 static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38 static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39 static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40 /* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41 static int txdmacount = 128;
42 static int rxdmacount /* = 0 */;
44 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || \
45 defined(__mips__) || defined(__arm__) || defined(__hppa__)
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47 # define rx_align(skb) skb_reserve((skb), 2)
48 # define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
50 # define rx_align(skb)
51 # define RxFD_ALIGNMENT
54 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56 static int rx_copybreak = 200;
58 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59 static int max_interrupt_work = 20;
61 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62 static int multicast_filter_limit = 64;
64 /* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66 static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67 static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
69 /* A few values that may be tweaked. */
70 /* The ring sizes should be a power of two for efficiency. */
71 #define TX_RING_SIZE 64
72 #define RX_RING_SIZE 64
73 /* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75 #define TX_MULTICAST_SIZE 2
76 #define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77 /* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79 #define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80 /* Hysteresis marking queue as no longer full. */
81 #define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
83 /* Operational parameters that usually are not changed. */
85 /* Time in jiffies before concluding the transmitter is hung. */
86 #define TX_TIMEOUT (2*HZ)
87 /* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88 #define PKT_BUF_SZ 1536
90 #if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
91 #warning You must compile this file with the correct options!
92 #warning See the last lines of the source file.
93 #error You must compile this driver with "-O".
96 #include <linux/config.h>
97 #include <linux/version.h>
98 #include <linux/module.h>
100 #include <linux/kernel.h>
101 #include <linux/string.h>
102 #include <linux/errno.h>
103 #include <linux/ioport.h>
104 #include <linux/slab.h>
105 #include <linux/interrupt.h>
106 #include <linux/timer.h>
107 #include <linux/pci.h>
108 #include <linux/spinlock.h>
109 #include <linux/init.h>
110 #include <linux/mii.h>
111 #include <linux/delay.h>
113 #include <asm/bitops.h>
115 #include <asm/uaccess.h>
117 #include <linux/netdevice.h>
118 #include <linux/etherdevice.h>
119 #include <linux/skbuff.h>
120 #include <linux/ethtool.h>
121 #include <linux/mii.h>
123 static int debug = -1;
124 #define DEBUG_DEFAULT (NETIF_MSG_DRV | \
128 #define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
131 MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
132 MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
133 MODULE_LICENSE("GPL");
134 MODULE_PARM(debug, "i");
135 MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
136 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
137 MODULE_PARM(congenb, "i");
138 MODULE_PARM(txfifo, "i");
139 MODULE_PARM(rxfifo, "i");
140 MODULE_PARM(txdmacount, "i");
141 MODULE_PARM(rxdmacount, "i");
142 MODULE_PARM(rx_copybreak, "i");
143 MODULE_PARM(max_interrupt_work, "i");
144 MODULE_PARM(multicast_filter_limit, "i");
145 MODULE_PARM_DESC(debug, "debug level (0-6)");
146 MODULE_PARM_DESC(options, "Bits 0-3: tranceiver type, bit 4: full duplex, bit 5: 100Mbps");
147 MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
148 MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
149 MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
150 MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
151 MODULE_PARM_DESC(txdmaccount, "Tx DMA burst length; 128 - disable (0-128)");
152 MODULE_PARM_DESC(rxdmaccount, "Rx DMA burst length; 128 - disable (0-128)");
153 MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
154 MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
155 MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
157 #define RUN_AT(x) (jiffies + (x))
159 /* ACPI power states don't universally work (yet) */
161 #undef pci_set_power_state
162 #define pci_set_power_state null_set_power_state
163 static inline int null_set_power_state(struct pci_dev *dev, int state)
167 #endif /* CONFIG_PM */
169 #define netdevice_start(dev)
170 #define netdevice_stop(dev)
171 #define netif_set_tx_timeout(dev, tf, tm) \
173 (dev)->tx_timeout = (tf); \
174 (dev)->watchdog_timeo = (tm); \
182 I. Board Compatibility
184 This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
185 single-chip fast Ethernet controller for PCI, as used on the Intel
186 EtherExpress Pro 100 adapter.
188 II. Board-specific settings
190 PCI bus devices are configured by the system at boot time, so no jumpers
191 need to be set on the board. The system BIOS should be set to assign the
192 PCI INTA signal to an otherwise unused system IRQ line. While it's
193 possible to share PCI interrupt lines, it negatively impacts performance and
194 only recent kernels support it.
196 III. Driver operation
199 The Speedo3 is very similar to other Intel network chips, that is to say
200 "apparently designed on a different planet". This chips retains the complex
201 Rx and Tx descriptors and multiple buffers pointers as previous chips, but
202 also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
203 Tx mode, but in a simplified lower-overhead manner: it associates only a
204 single buffer descriptor with each frame descriptor.
206 Despite the extra space overhead in each receive skbuff, the driver must use
207 the simplified Rx buffer mode to assure that only a single data buffer is
208 associated with each RxFD. The driver implements this by reserving space
209 for the Rx descriptor at the head of each Rx skbuff.
211 The Speedo-3 has receive and command unit base addresses that are added to
212 almost all descriptor pointers. The driver sets these to zero, so that all
213 pointer fields are absolute addresses.
215 The System Control Block (SCB) of some previous Intel chips exists on the
216 chip in both PCI I/O and memory space. This driver uses the I/O space
217 registers, but might switch to memory mapped mode to better support non-x86
220 IIIB. Transmit structure
222 The driver must use the complex Tx command+descriptor mode in order to
223 have a indirect pointer to the skbuff data section. Each Tx command block
224 (TxCB) is associated with two immediately appended Tx Buffer Descriptor
225 (TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
226 speedo_private data structure for each adapter instance.
228 The newer i82558 explicitly supports this structure, and can read the two
229 TxBDs in the same PCI burst as the TxCB.
231 This ring structure is used for all normal transmit packets, but the
232 transmit packet descriptors aren't long enough for most non-Tx commands such
233 as CmdConfigure. This is complicated by the possibility that the chip has
234 already loaded the link address in the previous descriptor. So for these
235 commands we convert the next free descriptor on the ring to a NoOp, and point
236 that descriptor's link to the complex command.
238 An additional complexity of these non-transmit commands are that they may be
239 added asynchronous to the normal transmit queue, so we disable interrupts
240 whenever the Tx descriptor ring is manipulated.
242 A notable aspect of these special configure commands is that they do
243 work with the normal Tx ring entry scavenge method. The Tx ring scavenge
244 is done at interrupt time using the 'dirty_tx' index, and checking for the
245 command-complete bit. While the setup frames may have the NoOp command on the
246 Tx ring marked as complete, but not have completed the setup command, this
247 is not a problem. The tx_ring entry can be still safely reused, as the
248 tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
250 Commands may have bits set e.g. CmdSuspend in the command word to either
251 suspend or stop the transmit/command unit. This driver always flags the last
252 command with CmdSuspend, erases the CmdSuspend in the previous command, and
253 then issues a CU_RESUME.
254 Note: Watch out for the potential race condition here: imagine
255 erasing the previous suspend
256 the chip processes the previous command
257 the chip processes the final command, and suspends
259 the chip processes the next-yet-valid post-final-command.
260 So blindly sending a CU_RESUME is only safe if we do it immediately after
261 after erasing the previous CmdSuspend, without the possibility of an
262 intervening delay. Thus the resume command is always within the
263 interrupts-disabled region. This is a timing dependence, but handling this
264 condition in a timing-independent way would considerably complicate the code.
266 Note: In previous generation Intel chips, restarting the command unit was a
267 notoriously slow process. This is presumably no longer true.
269 IIIC. Receive structure
271 Because of the bus-master support on the Speedo3 this driver uses the new
272 SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
273 This scheme allocates full-sized skbuffs as receive buffers. The value
274 SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
275 trade-off the memory wasted by passing the full-sized skbuff to the queue
276 layer for all frames vs. the copying cost of copying a frame to a
277 correctly-sized skbuff.
279 For small frames the copying cost is negligible (esp. considering that we
280 are pre-loading the cache with immediately useful header information), so we
281 allocate a new, minimally-sized skbuff. For large frames the copying cost
282 is non-trivial, and the larger copy might flush the cache of useful data, so
283 we pass up the skbuff the packet was received into.
287 Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
288 that stated that I could disclose the information. But I still resent
289 having to sign an Intel NDA when I'm helping Intel sell their own product!
293 static int speedo_found1(struct pci_dev *pdev, long ioaddr, int fnd_cnt, int acpi_idle_state);
296 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
297 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
300 static inline unsigned int io_inw(unsigned long port)
304 static inline void io_outw(unsigned int val, unsigned long port)
310 /* Currently alpha headers define in/out macros.
311 Undefine them. 2000/03/30 SAW */
326 /* Offsets to the various registers.
327 All accesses need not be longword aligned. */
328 enum speedo_offsets {
329 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
331 SCBPointer = 4, /* General purpose pointer. */
332 SCBPort = 8, /* Misc. commands and operands. */
333 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
334 SCBCtrlMDI = 16, /* MDI interface control. */
335 SCBEarlyRx = 20, /* Early receive byte count. */
337 /* Commands that can be put in a command list entry. */
339 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
340 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
341 CmdDump = 0x60000, CmdDiagnose = 0x70000,
342 CmdSuspend = 0x40000000, /* Suspend after completion. */
343 CmdIntr = 0x20000000, /* Interrupt after completion. */
344 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
346 /* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
347 status bits. Previous driver versions used separate 16 bit fields for
348 commands and statuses. --SAW
350 #if defined(__alpha__)
351 # define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
353 # if defined(__LITTLE_ENDIAN)
354 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
355 # elif defined(__BIG_ENDIAN)
356 # define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
358 # error Unsupported byteorder
363 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
364 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
365 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
366 /* The rest are Rx and Tx commands. */
367 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
368 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
369 CUDumpStats=0x0070, /* Dump then reset stats counters. */
370 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
371 RxResumeNoResources=0x0007,
375 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
378 /* The Speedo3 Rx and Tx frame/buffer descriptors. */
379 struct descriptor { /* A generic descriptor. */
380 volatile s32 cmd_status; /* All command and status fields. */
381 u32 link; /* struct descriptor * */
382 unsigned char params[0];
385 /* The Speedo3 Rx and Tx buffer descriptors. */
386 struct RxFD { /* Receive frame descriptor. */
388 u32 link; /* struct RxFD * */
389 u32 rx_buf_addr; /* void * */
393 /* Selected elements of the Tx/RxFD.status word. */
395 RxComplete=0x8000, RxOK=0x2000,
396 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
397 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
398 TxUnderrun=0x1000, StatusComplete=0x8000,
401 #define CONFIG_DATA_SIZE 22
402 struct TxFD { /* Transmit frame descriptor set. */
404 u32 link; /* void * */
405 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
406 s32 count; /* # of TBD (=1), Tx start thresh., etc. */
407 /* This constitutes two "TBD" entries -- we only use one. */
408 #define TX_DESCR_BUF_OFFSET 16
409 u32 tx_buf_addr0; /* void *, frame to be transmitted. */
410 s32 tx_buf_size0; /* Length of Tx frame. */
411 u32 tx_buf_addr1; /* void *, frame to be transmitted. */
412 s32 tx_buf_size1; /* Length of Tx frame. */
413 /* the structure must have space for at least CONFIG_DATA_SIZE starting
414 * from tx_desc_addr field */
417 /* Multicast filter setting block. --SAW */
418 struct speedo_mc_block {
419 struct speedo_mc_block *next;
421 dma_addr_t frame_dma;
423 struct descriptor frame __attribute__ ((__aligned__(16)));
426 /* Elements of the dump_statistics block. This block must be lword aligned. */
427 struct speedo_stats {
440 u32 rx_resource_errs;
447 enum Rx_ring_state_bits {
448 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
451 /* Do not change the position (alignment) of the first few elements!
452 The later elements are grouped for cache locality.
454 Unfortunately, all the positions have been shifted since there.
455 A new re-alignment is required. 2000/03/06 SAW */
456 struct speedo_private {
457 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
458 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
459 /* The addresses of a Tx/Rx-in-place packets/buffers. */
460 struct sk_buff *tx_skbuff[TX_RING_SIZE];
461 struct sk_buff *rx_skbuff[RX_RING_SIZE];
462 /* Mapped addresses of the rings. */
463 dma_addr_t tx_ring_dma;
464 #define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
465 dma_addr_t rx_ring_dma[RX_RING_SIZE];
466 struct descriptor *last_cmd; /* Last command sent. */
467 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
468 spinlock_t lock; /* Group with Tx control cache line. */
469 u32 tx_threshold; /* The value for txdesc.count. */
470 struct RxFD *last_rxf; /* Last filled RX buffer. */
471 dma_addr_t last_rxf_dma;
472 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
473 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
474 struct net_device_stats stats;
475 struct speedo_stats *lstats;
476 dma_addr_t lstats_dma;
478 struct pci_dev *pdev;
479 struct timer_list timer; /* Media selection timer. */
480 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
481 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
482 long in_interrupt; /* Word-aligned dev->interrupt */
483 unsigned char acpi_pwr;
484 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
485 unsigned int tx_full:1; /* The Tx queue is full. */
486 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
487 unsigned int rx_bug:1; /* Work around receiver hang errata. */
488 unsigned char default_port:8; /* Last dev->if_port value. */
489 unsigned char rx_ring_state; /* RX ring status flags. */
490 unsigned short phy[2]; /* PHY media interfaces available. */
491 unsigned short partner; /* Link partner caps. */
492 struct mii_if_info mii_if; /* MII API hooks, info */
493 u32 msg_enable; /* debug message level */
499 /* The parameters for a CmdConfigure operation.
500 There are so many options that it would be difficult to document each bit.
501 We mostly use the default or recommended settings. */
502 static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
503 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
505 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
507 static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
508 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
509 0, 0x2E, 0, 0x60, 0x08, 0x88,
510 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
513 /* PHY media interface chips. */
514 static const char *phys[] = {
515 "None", "i82553-A/B", "i82553-C", "i82503",
516 "DP83840", "80c240", "80c24", "i82555",
517 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
518 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
519 enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
520 S80C24, I82555, DP83840A=10, };
521 static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
522 #define EE_READ_CMD (6)
524 static int eepro100_init_one(struct pci_dev *pdev,
525 const struct pci_device_id *ent);
526 static void eepro100_remove_one (struct pci_dev *pdev);
528 static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
529 static int mdio_read(struct net_device *dev, int phy_id, int location);
530 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
531 static int speedo_open(struct net_device *dev);
532 static void speedo_resume(struct net_device *dev);
533 static void speedo_timer(unsigned long data);
534 static void speedo_init_rx_ring(struct net_device *dev);
535 static void speedo_tx_timeout(struct net_device *dev);
536 static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
537 static void speedo_refill_rx_buffers(struct net_device *dev, int force);
538 static int speedo_rx(struct net_device *dev);
539 static void speedo_tx_buffer_gc(struct net_device *dev);
540 static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
541 static int speedo_close(struct net_device *dev);
542 static struct net_device_stats *speedo_get_stats(struct net_device *dev);
543 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
544 static void set_rx_mode(struct net_device *dev);
545 static void speedo_show_state(struct net_device *dev);
549 #ifdef honor_default_port
550 /* Optional driver feature to allow forcing the transceiver setting.
552 static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
553 0x2000, 0x2100, 0x0400, 0x3100};
556 /* How to wait for the command unit to accept a command.
557 Typically this takes 0 ticks. */
558 static inline unsigned char wait_for_cmd_done(struct net_device *dev)
561 long cmd_ioaddr = dev->base_addr + SCBCmd;
567 } while(r && --wait >= 0);
570 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
574 static int __devinit eepro100_init_one (struct pci_dev *pdev,
575 const struct pci_device_id *ent)
577 unsigned long ioaddr;
579 int acpi_idle_state = 0, pm;
580 static int cards_found /* = 0 */;
583 /* when built-in, we only print version if device is found */
584 static int did_version;
585 if (did_version++ == 0)
589 /* save power state before pci_enable_device overwrites it */
590 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
593 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
594 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
597 if (pci_enable_device(pdev))
598 goto err_out_free_mmio_region;
600 pci_set_master(pdev);
602 if (!request_region(pci_resource_start(pdev, 1),
603 pci_resource_len(pdev, 1), "eepro100")) {
604 printk (KERN_ERR "eepro100: cannot reserve I/O ports\n");
607 if (!request_mem_region(pci_resource_start(pdev, 0),
608 pci_resource_len(pdev, 0), "eepro100")) {
609 printk (KERN_ERR "eepro100: cannot reserve MMIO region\n");
610 goto err_out_free_pio_region;
615 ioaddr = pci_resource_start(pdev, 1);
616 if (DEBUG & NETIF_MSG_PROBE)
617 printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
620 ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),
621 pci_resource_len(pdev, 0));
623 printk (KERN_ERR "eepro100: cannot remap MMIO region %lx @ %lx\n",
624 pci_resource_len(pdev, 0), pci_resource_start(pdev, 0));
625 goto err_out_free_mmio_region;
627 if (DEBUG & NETIF_MSG_PROBE)
628 printk("Found Intel i82557 PCI Speedo, MMIO at %#lx, IRQ %d.\n",
629 pci_resource_start(pdev, 0), irq);
633 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
636 goto err_out_iounmap;
642 iounmap ((void *)ioaddr);
644 err_out_free_mmio_region:
645 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
646 err_out_free_pio_region:
647 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
652 static int __devinit speedo_found1(struct pci_dev *pdev,
653 long ioaddr, int card_idx, int acpi_idle_state)
655 struct net_device *dev;
656 struct speedo_private *sp;
662 dma_addr_t tx_ring_dma;
664 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
665 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
666 if (tx_ring_space == NULL)
669 dev = init_etherdev(NULL, sizeof(struct speedo_private));
671 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
672 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
676 SET_MODULE_OWNER(dev);
678 if (dev->mem_start > 0)
679 option = dev->mem_start;
680 else if (card_idx >= 0 && options[card_idx] >= 0)
681 option = options[card_idx];
685 /* Read the station address EEPROM before doing the reset.
686 Nominally his should even be done before accepting the device, but
687 then we wouldn't have a device name with which to report the error.
688 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
691 unsigned long iobase;
692 int read_cmd, ee_size;
696 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
698 iobase = pci_resource_start(pdev, 1);
699 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
702 read_cmd = EE_READ_CMD << 24;
705 read_cmd = EE_READ_CMD << 22;
708 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
709 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
713 dev->dev_addr[j++] = value;
714 dev->dev_addr[j++] = value >> 8;
718 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
719 "check settings before activating this device!\n",
721 /* Don't unregister_netdev(dev); as the EEPro may actually be
722 usable, especially if the MAC address is set later.
723 On the other hand, it may be unusable if MDI data is corrupted. */
726 /* Reset the chip: stop Tx and Rx processes and clear counters.
727 This takes less than 10usec and will easily finish before the next
729 outl(PortReset, ioaddr + SCBPort);
730 inl(ioaddr + SCBPort);
733 if (eeprom[3] & 0x0100)
734 product = "OEM i82557/i82558 10/100 Ethernet";
736 product = pdev->name;
738 printk(KERN_INFO "%s: %s, ", dev->name, product);
740 for (i = 0; i < 5; i++)
741 printk("%2.2X:", dev->dev_addr[i]);
742 printk("%2.2X, ", dev->dev_addr[i]);
744 printk("I/O at %#3lx, ", ioaddr);
746 printk("IRQ %d.\n", pdev->irq);
748 /* we must initialize base_addr early, for mdio_{read,write} */
749 dev->base_addr = ioaddr;
751 #if 1 || defined(kernel_bloat)
752 /* OK, this is pure kernel bloat. I don't like it when other drivers
753 waste non-pageable kernel space to emit similar messages, but I need
754 them for bug reports. */
756 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
757 /* The self-test results must be paragraph aligned. */
758 volatile s32 *self_test_results;
759 int boguscnt = 16000; /* Timeout for set-test. */
760 if ((eeprom[3] & 0x03) != 0x03)
761 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
763 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
764 " connectors present:",
765 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
766 for (i = 0; i < 4; i++)
767 if (eeprom[5] & (1<<i))
768 printk(connectors[i]);
769 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
770 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
771 if (eeprom[7] & 0x0700)
772 printk(KERN_INFO " Secondary interface chip %s.\n",
773 phys[(eeprom[7]>>8)&7]);
774 if (((eeprom[6]>>8) & 0x3f) == DP83840
775 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
776 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
779 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
781 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
783 if ((option >= 0) && (option & 0x70)) {
784 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
785 (option & 0x20 ? 100 : 10),
786 (option & 0x10 ? "full" : "half"));
787 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
788 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
789 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
792 /* Perform a system self-test. */
793 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
794 self_test_results[0] = 0;
795 self_test_results[1] = -1;
796 outl(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
799 } while (self_test_results[1] == -1 && --boguscnt >= 0);
801 if (boguscnt < 0) { /* Test optimized out. */
802 printk(KERN_ERR "Self test failed, status %8.8x:\n"
803 KERN_ERR " Failure to initialize the i82557.\n"
804 KERN_ERR " Verify that the card is a bus-master"
806 self_test_results[1]);
808 printk(KERN_INFO " General self-test: %s.\n"
809 KERN_INFO " Serial sub-system self-test: %s.\n"
810 KERN_INFO " Internal registers self-test: %s.\n"
811 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
812 self_test_results[1] & 0x1000 ? "failed" : "passed",
813 self_test_results[1] & 0x0020 ? "failed" : "passed",
814 self_test_results[1] & 0x0008 ? "failed" : "passed",
815 self_test_results[1] & 0x0004 ? "failed" : "passed",
816 self_test_results[0]);
818 #endif /* kernel_bloat */
820 outl(PortReset, ioaddr + SCBPort);
821 inl(ioaddr + SCBPort);
824 /* Return the chip to its original power state. */
825 pci_set_power_state(pdev, acpi_idle_state);
827 pci_set_drvdata (pdev, dev);
829 dev->irq = pdev->irq;
833 sp->msg_enable = DEBUG;
834 sp->acpi_pwr = acpi_idle_state;
835 sp->tx_ring = tx_ring_space;
836 sp->tx_ring_dma = tx_ring_dma;
837 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
838 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
839 init_timer(&sp->timer); /* used in ioctl() */
841 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
843 if (full_duplex[card_idx] >= 0)
844 sp->mii_if.full_duplex = full_duplex[card_idx];
846 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
848 sp->phy[0] = eeprom[6];
849 sp->phy[1] = eeprom[7];
851 sp->mii_if.phy_id = eeprom[6] & 0x1f;
852 sp->mii_if.phy_id_mask = 0x1f;
853 sp->mii_if.reg_num_mask = 0x1f;
854 sp->mii_if.dev = dev;
855 sp->mii_if.mdio_read = mdio_read;
856 sp->mii_if.mdio_write = mdio_write;
858 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
859 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
860 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
861 || (pdev->device == 0x245D)) {
866 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
868 /* The Speedo-specific entries in the device structure. */
869 dev->open = &speedo_open;
870 dev->hard_start_xmit = &speedo_start_xmit;
871 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
872 dev->stop = &speedo_close;
873 dev->get_stats = &speedo_get_stats;
874 dev->set_multicast_list = &set_rx_mode;
875 dev->do_ioctl = &speedo_ioctl;
880 static void do_slow_command(struct net_device *dev, int cmd)
882 long cmd_ioaddr = dev->base_addr + SCBCmd;
885 if (inb(cmd_ioaddr) == 0) break;
886 while(++wait <= 200);
888 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
889 inb(cmd_ioaddr), wait);
891 outb(cmd, cmd_ioaddr);
893 for (wait = 0; wait <= 100; wait++)
894 if (inb(cmd_ioaddr) == 0) return;
895 for (; wait <= 20000; wait++)
896 if (inb(cmd_ioaddr) == 0) return;
898 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
899 " Current status %8.8x.\n",
900 cmd, wait, inl(dev->base_addr + SCBStatus));
903 /* Serial EEPROM section.
904 A "bit" grungy, but we work our way through bit-by-bit :->. */
905 /* EEPROM_Ctrl bits. */
906 #define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
907 #define EE_CS 0x02 /* EEPROM chip select. */
908 #define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
909 #define EE_DATA_READ 0x08 /* EEPROM chip data out. */
910 #define EE_ENB (0x4800 | EE_CS)
911 #define EE_WRITE_0 0x4802
912 #define EE_WRITE_1 0x4806
913 #define EE_OFFSET SCBeeprom
915 /* The fixes for the code were kindly provided by Dragan Stancevic
916 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
918 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
919 interval for serial EEPROM. However, it looks like that there is an
920 additional requirement dictating larger udelay's in the code below.
922 static int __devinit do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
925 long ee_addr = ioaddr + SCBeeprom;
927 io_outw(EE_ENB, ee_addr); udelay(2);
928 io_outw(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
930 /* Shift the command bits out. */
932 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
933 io_outw(dataval, ee_addr); udelay(2);
934 io_outw(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
935 retval = (retval << 1) | ((io_inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
936 } while (--cmd_len >= 0);
937 io_outw(EE_ENB, ee_addr); udelay(2);
939 /* Terminate the EEPROM access. */
940 io_outw(EE_ENB & ~EE_CS, ee_addr);
944 static int mdio_read(struct net_device *dev, int phy_id, int location)
946 long ioaddr = dev->base_addr;
947 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
948 outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
950 val = inl(ioaddr + SCBCtrlMDI);
951 if (--boguscnt < 0) {
952 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
955 } while (! (val & 0x10000000));
959 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
961 long ioaddr = dev->base_addr;
962 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
963 outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
964 ioaddr + SCBCtrlMDI);
966 val = inl(ioaddr + SCBCtrlMDI);
967 if (--boguscnt < 0) {
968 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
971 } while (! (val & 0x10000000));
975 speedo_open(struct net_device *dev)
977 struct speedo_private *sp = (struct speedo_private *)dev->priv;
978 long ioaddr = dev->base_addr;
981 if (netif_msg_ifup(sp))
982 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
984 pci_set_power_state(sp->pdev, 0);
986 /* Set up the Tx queue early.. */
991 spin_lock_init(&sp->lock);
992 sp->in_interrupt = 0;
994 /* .. we can safely take handler calls during init. */
995 retval = request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev);
1000 dev->if_port = sp->default_port;
1002 #ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
1003 /* Retrigger negotiation to reset previous errors. */
1004 if ((sp->phy[0] & 0x8000) == 0) {
1005 int phy_addr = sp->phy[0] & 0x1f ;
1006 /* Use 0x3300 for restarting NWay, other values to force xcvr:
1012 #ifdef honor_default_port
1013 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1015 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
1020 speedo_init_rx_ring(dev);
1022 /* Fire up the hardware. */
1023 outw(SCBMaskAll, ioaddr + SCBCmd);
1026 netdevice_start(dev);
1027 netif_start_queue(dev);
1029 /* Setup the chip and configure the multicast list. */
1030 sp->mc_setup_head = NULL;
1031 sp->mc_setup_tail = NULL;
1032 sp->flow_ctrl = sp->partner = 0;
1033 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1035 if ((sp->phy[0] & 0x8000) == 0)
1036 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1038 mii_check_link(&sp->mii_if);
1040 if (netif_msg_ifup(sp)) {
1041 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1042 dev->name, inw(ioaddr + SCBStatus));
1045 /* Set the timer. The timer serves a dual purpose:
1046 1) to monitor the media interface (e.g. link beat) and perhaps switch
1047 to an alternate media type
1048 2) to monitor Rx activity, and restart the Rx process if the receiver
1050 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1051 sp->timer.data = (unsigned long)dev;
1052 sp->timer.function = &speedo_timer; /* timer handler */
1053 add_timer(&sp->timer);
1055 /* No need to wait for the command unit to accept here. */
1056 if ((sp->phy[0] & 0x8000) == 0)
1057 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1062 /* Start the chip hardware after a full reset. */
1063 static void speedo_resume(struct net_device *dev)
1065 struct speedo_private *sp = dev->priv;
1066 long ioaddr = dev->base_addr;
1068 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1069 sp->tx_threshold = 0x01208000;
1071 /* Set the segment registers to '0'. */
1072 if (wait_for_cmd_done(dev) != 0) {
1073 outl(PortPartialReset, ioaddr + SCBPort);
1077 outl(0, ioaddr + SCBPointer);
1078 inl(ioaddr + SCBPointer); /* Flush to PCI. */
1079 udelay(10); /* Bogus, but it avoids the bug. */
1081 /* Note: these next two operations can take a while. */
1082 do_slow_command(dev, RxAddrLoad);
1083 do_slow_command(dev, CUCmdBase);
1085 /* Load the statistics block and rx ring addresses. */
1086 outl(sp->lstats_dma, ioaddr + SCBPointer);
1087 inl(ioaddr + SCBPointer); /* Flush to PCI */
1089 outb(CUStatsAddr, ioaddr + SCBCmd);
1090 sp->lstats->done_marker = 0;
1091 wait_for_cmd_done(dev);
1093 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1094 if (netif_msg_rx_err(sp))
1095 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1098 outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1099 ioaddr + SCBPointer);
1100 inl(ioaddr + SCBPointer); /* Flush to PCI */
1103 /* Note: RxStart should complete instantly. */
1104 do_slow_command(dev, RxStart);
1105 do_slow_command(dev, CUDumpStats);
1107 /* Fill the first command with our physical address. */
1109 struct descriptor *ias_cmd;
1112 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1113 /* Avoid a bug(?!) here by marking the command already completed. */
1114 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1116 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1117 memcpy(ias_cmd->params, dev->dev_addr, 6);
1119 clear_suspend(sp->last_cmd);
1120 sp->last_cmd = ias_cmd;
1123 /* Start the chip's Tx process and unmask interrupts. */
1124 outl(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1125 ioaddr + SCBPointer);
1126 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1127 remain masked --Dragan */
1128 outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1132 * Sometimes the receiver stops making progress. This routine knows how to
1133 * get it going again, without losing packets or being otherwise nasty like
1134 * a chip reset would be. Previously the driver had a whole sequence
1135 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1136 * do another, etc. But those things don't really matter. Separate logic
1137 * in the ISR provides for allocating buffers--the other half of operation
1138 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1139 * This problem with the old, more involved algorithm is shown up under
1140 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1143 speedo_rx_soft_reset(struct net_device *dev)
1145 struct speedo_private *sp = dev->priv;
1149 ioaddr = dev->base_addr;
1150 if (wait_for_cmd_done(dev) != 0) {
1151 printk("%s: previous command stalled\n", dev->name);
1155 * Put the hardware into a known state.
1157 outb(RxAbort, ioaddr + SCBCmd);
1159 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1161 rfd->rx_buf_addr = 0xffffffff;
1163 if (wait_for_cmd_done(dev) != 0) {
1164 printk("%s: RxAbort command stalled\n", dev->name);
1167 outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1168 ioaddr + SCBPointer);
1169 outb(RxStart, ioaddr + SCBCmd);
1173 /* Media monitoring and control. */
1174 static void speedo_timer(unsigned long data)
1176 struct net_device *dev = (struct net_device *)data;
1177 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1178 long ioaddr = dev->base_addr;
1179 int phy_num = sp->phy[0] & 0x1f;
1181 /* We have MII and lost link beat. */
1182 if ((sp->phy[0] & 0x8000) == 0) {
1183 int partner = mdio_read(dev, phy_num, MII_LPA);
1184 if (partner != sp->partner) {
1185 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1186 if (netif_msg_link(sp)) {
1187 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1188 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1189 dev->name, sp->partner, partner, sp->mii_if.advertising);
1191 sp->partner = partner;
1192 if (flow_ctrl != sp->flow_ctrl) {
1193 sp->flow_ctrl = flow_ctrl;
1194 sp->rx_mode = -1; /* Trigger a reload. */
1198 mii_check_link(&sp->mii_if);
1199 if (netif_msg_timer(sp)) {
1200 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1201 dev->name, inw(ioaddr + SCBStatus));
1203 if (sp->rx_mode < 0 ||
1204 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1205 /* We haven't received a packet in a Long Time. We might have been
1206 bitten by the receiver hang bug. This can be cleared by sending
1207 a set multicast list command. */
1208 if (netif_msg_timer(sp))
1209 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1210 " from a timer routine,"
1211 " m=%d, j=%ld, l=%ld.\n",
1212 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1215 /* We must continue to monitor the media. */
1216 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1217 add_timer(&sp->timer);
1218 #if defined(timer_exit)
1219 timer_exit(&sp->timer);
1223 static void speedo_show_state(struct net_device *dev)
1225 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1228 if (netif_msg_pktdata(sp)) {
1229 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
1230 dev->name, sp->cur_tx, sp->dirty_tx);
1231 for (i = 0; i < TX_RING_SIZE; i++)
1232 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1233 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1234 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1235 i, sp->tx_ring[i].status);
1237 printk(KERN_DEBUG "%s: Printing Rx ring"
1238 " (next to receive into %u, dirty index %u).\n",
1239 dev->name, sp->cur_rx, sp->dirty_rx);
1240 for (i = 0; i < RX_RING_SIZE; i++)
1241 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1242 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1243 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1244 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1245 i, (sp->rx_ringp[i] != NULL) ?
1246 (unsigned)sp->rx_ringp[i]->status : 0);
1251 long ioaddr = dev->base_addr;
1252 int phy_num = sp->phy[0] & 0x1f;
1253 for (i = 0; i < 16; i++) {
1254 /* FIXME: what does it mean? --SAW */
1256 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1257 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1264 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1266 speedo_init_rx_ring(struct net_device *dev)
1268 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1269 struct RxFD *rxf, *last_rxf = NULL;
1270 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1275 for (i = 0; i < RX_RING_SIZE; i++) {
1276 struct sk_buff *skb;
1277 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1278 /* XXX: do we really want to call this before the NULL check? --hch */
1279 rx_align(skb); /* Align IP on 16 byte boundary */
1280 sp->rx_skbuff[i] = skb;
1282 break; /* OK. Just initially short of Rx bufs. */
1283 skb->dev = dev; /* Mark as being used by this device. */
1284 rxf = (struct RxFD *)skb->tail;
1285 sp->rx_ringp[i] = rxf;
1286 sp->rx_ring_dma[i] =
1287 pci_map_single(sp->pdev, rxf,
1288 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1289 skb_reserve(skb, sizeof(struct RxFD));
1291 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1292 pci_dma_sync_single(sp->pdev, last_rxf_dma,
1293 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1296 last_rxf_dma = sp->rx_ring_dma[i];
1297 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1298 rxf->link = 0; /* None yet. */
1299 /* This field unused by i82557. */
1300 rxf->rx_buf_addr = 0xffffffff;
1301 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1302 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[i],
1303 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1305 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1306 /* Mark the last entry as end-of-list. */
1307 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1308 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1309 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1310 sp->last_rxf = last_rxf;
1311 sp->last_rxf_dma = last_rxf_dma;
1314 static void speedo_purge_tx(struct net_device *dev)
1316 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1319 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1320 entry = sp->dirty_tx % TX_RING_SIZE;
1321 if (sp->tx_skbuff[entry]) {
1322 sp->stats.tx_errors++;
1323 pci_unmap_single(sp->pdev,
1324 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1325 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1326 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1327 sp->tx_skbuff[entry] = 0;
1331 while (sp->mc_setup_head != NULL) {
1332 struct speedo_mc_block *t;
1333 if (netif_msg_tx_err(sp))
1334 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1335 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1336 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1337 t = sp->mc_setup_head->next;
1338 kfree(sp->mc_setup_head);
1339 sp->mc_setup_head = t;
1341 sp->mc_setup_tail = NULL;
1343 netif_wake_queue(dev);
1346 static void reset_mii(struct net_device *dev)
1348 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1350 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1351 if ((sp->phy[0] & 0x8000) == 0) {
1352 int phy_addr = sp->phy[0] & 0x1f;
1353 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1354 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1355 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1356 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1357 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1358 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1359 #ifdef honor_default_port
1360 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1362 mdio_read(dev, phy_addr, MII_BMCR);
1363 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1364 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1369 static void speedo_tx_timeout(struct net_device *dev)
1371 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1372 long ioaddr = dev->base_addr;
1373 int status = inw(ioaddr + SCBStatus);
1374 unsigned long flags;
1376 if (netif_msg_tx_err(sp)) {
1377 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1378 " %4.4x at %d/%d command %8.8x.\n",
1379 dev->name, status, inw(ioaddr + SCBCmd),
1380 sp->dirty_tx, sp->cur_tx,
1381 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1384 speedo_show_state(dev);
1386 if ((status & 0x00C0) != 0x0080
1387 && (status & 0x003C) == 0x0010) {
1388 /* Only the command unit has stopped. */
1389 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1391 outl(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1392 ioaddr + SCBPointer);
1393 outw(CUStart, ioaddr + SCBCmd);
1399 del_timer_sync(&sp->timer);
1400 /* Reset the Tx and Rx units. */
1401 outl(PortReset, ioaddr + SCBPort);
1402 /* We may get spurious interrupts here. But I don't think that they
1403 may do much harm. 1999/12/09 SAW */
1405 /* Disable interrupts. */
1406 outw(SCBMaskAll, ioaddr + SCBCmd);
1408 speedo_tx_buffer_gc(dev);
1409 /* Free as much as possible.
1410 It helps to recover from a hang because of out-of-memory.
1411 It also simplifies speedo_resume() in case TX ring is full or
1412 close-to-be full. */
1413 speedo_purge_tx(dev);
1414 speedo_refill_rx_buffers(dev, 1);
1415 spin_lock_irqsave(&sp->lock, flags);
1418 dev->trans_start = jiffies;
1419 spin_unlock_irqrestore(&sp->lock, flags);
1420 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1421 /* Reset MII transceiver. Do it before starting the timer to serialize
1422 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1424 sp->timer.expires = RUN_AT(2*HZ);
1425 add_timer(&sp->timer);
1431 speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1433 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1434 long ioaddr = dev->base_addr;
1437 /* Prevent interrupts from changing the Tx ring from underneath us. */
1438 unsigned long flags;
1440 spin_lock_irqsave(&sp->lock, flags);
1442 /* Check if there are enough space. */
1443 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1444 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1445 netif_stop_queue(dev);
1447 spin_unlock_irqrestore(&sp->lock, flags);
1451 /* Calculate the Tx descriptor entry. */
1452 entry = sp->cur_tx++ % TX_RING_SIZE;
1454 sp->tx_skbuff[entry] = skb;
1455 sp->tx_ring[entry].status =
1456 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1457 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1458 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1459 sp->tx_ring[entry].link =
1460 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1461 sp->tx_ring[entry].tx_desc_addr =
1462 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1463 /* The data region is always in one buffer descriptor. */
1464 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1465 sp->tx_ring[entry].tx_buf_addr0 =
1466 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1467 skb->len, PCI_DMA_TODEVICE));
1468 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1470 /* workaround for hardware bug on 10 mbit half duplex */
1472 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1473 wait_for_cmd_done(dev);
1474 outb(0 , ioaddr + SCBCmd);
1478 /* Trigger the command unit resume. */
1479 wait_for_cmd_done(dev);
1480 clear_suspend(sp->last_cmd);
1481 /* We want the time window between clearing suspend flag on the previous
1482 command and resuming CU to be as small as possible.
1483 Interrupts in between are very undesired. --SAW */
1484 outb(CUResume, ioaddr + SCBCmd);
1485 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1487 /* Leave room for set_rx_mode(). If there is no more space than reserved
1488 for multicast filter mark the ring as full. */
1489 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1490 netif_stop_queue(dev);
1494 spin_unlock_irqrestore(&sp->lock, flags);
1496 dev->trans_start = jiffies;
1501 static void speedo_tx_buffer_gc(struct net_device *dev)
1503 unsigned int dirty_tx;
1504 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1506 dirty_tx = sp->dirty_tx;
1507 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1508 int entry = dirty_tx % TX_RING_SIZE;
1509 int status = le32_to_cpu(sp->tx_ring[entry].status);
1511 if (netif_msg_tx_done(sp))
1512 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1514 if ((status & StatusComplete) == 0)
1515 break; /* It still hasn't been processed. */
1516 if (status & TxUnderrun)
1517 if (sp->tx_threshold < 0x01e08000) {
1518 if (netif_msg_tx_err(sp))
1519 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1521 sp->tx_threshold += 0x00040000;
1523 /* Free the original skb. */
1524 if (sp->tx_skbuff[entry]) {
1525 sp->stats.tx_packets++; /* Count only user packets. */
1526 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1527 pci_unmap_single(sp->pdev,
1528 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1529 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1530 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1531 sp->tx_skbuff[entry] = 0;
1536 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1537 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1539 dirty_tx, sp->cur_tx, sp->tx_full);
1540 dirty_tx += TX_RING_SIZE;
1543 while (sp->mc_setup_head != NULL
1544 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1545 struct speedo_mc_block *t;
1546 if (netif_msg_tx_err(sp))
1547 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1548 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1549 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1550 t = sp->mc_setup_head->next;
1551 kfree(sp->mc_setup_head);
1552 sp->mc_setup_head = t;
1554 if (sp->mc_setup_head == NULL)
1555 sp->mc_setup_tail = NULL;
1557 sp->dirty_tx = dirty_tx;
1560 /* The interrupt handler does all of the Rx thread work and cleans up
1561 after the Tx thread. */
1562 static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1564 struct net_device *dev = (struct net_device *)dev_instance;
1565 struct speedo_private *sp;
1566 long ioaddr, boguscnt = max_interrupt_work;
1567 unsigned short status;
1569 ioaddr = dev->base_addr;
1570 sp = (struct speedo_private *)dev->priv;
1572 #ifndef final_version
1573 /* A lock to prevent simultaneous entry on SMP machines. */
1574 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1575 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1577 sp->in_interrupt = 0; /* Avoid halting machine. */
1583 status = inw(ioaddr + SCBStatus);
1584 /* Acknowledge all of the current interrupt sources ASAP. */
1585 /* Will change from 0xfc00 to 0xff00 when we start handling
1586 FCP and ER interrupts --Dragan */
1587 outw(status & 0xfc00, ioaddr + SCBStatus);
1589 if (netif_msg_intr(sp))
1590 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1593 if ((status & 0xfc00) == 0)
1597 if ((status & 0x5000) || /* Packet received, or Rx error. */
1598 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1599 /* Need to gather the postponed packet. */
1602 /* Always check if all rx buffers are allocated. --SAW */
1603 speedo_refill_rx_buffers(dev, 0);
1605 spin_lock(&sp->lock);
1607 * The chip may have suspended reception for various reasons.
1608 * Check for that, and re-prime it should this be the case.
1610 switch ((status >> 2) & 0xf) {
1613 case 1: /* Suspended */
1614 case 2: /* No resources (RxFDs) */
1615 case 9: /* Suspended with no more RBDs */
1616 case 10: /* No resources due to no RBDs */
1617 case 12: /* Ready with no RBDs */
1618 speedo_rx_soft_reset(dev);
1620 case 3: case 5: case 6: case 7: case 8:
1621 case 11: case 13: case 14: case 15:
1622 /* these are all reserved values */
1627 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1628 if (status & 0xA400) {
1629 speedo_tx_buffer_gc(dev);
1631 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1632 /* The ring is no longer full. */
1634 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1638 spin_unlock(&sp->lock);
1640 if (--boguscnt < 0) {
1641 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1643 /* Clear all interrupt sources. */
1644 /* Will change from 0xfc00 to 0xff00 when we start handling
1645 FCP and ER interrupts --Dragan */
1646 outw(0xfc00, ioaddr + SCBStatus);
1651 if (netif_msg_intr(sp))
1652 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1653 dev->name, inw(ioaddr + SCBStatus));
1655 clear_bit(0, (void*)&sp->in_interrupt);
1659 static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1661 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1663 struct sk_buff *skb;
1664 /* Get a fresh skbuff to replace the consumed one. */
1665 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1666 /* XXX: do we really want to call this before the NULL check? --hch */
1667 rx_align(skb); /* Align IP on 16 byte boundary */
1668 sp->rx_skbuff[entry] = skb;
1670 sp->rx_ringp[entry] = NULL;
1673 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
1674 sp->rx_ring_dma[entry] =
1675 pci_map_single(sp->pdev, rxf,
1676 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1678 skb_reserve(skb, sizeof(struct RxFD));
1679 rxf->rx_buf_addr = 0xffffffff;
1680 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
1681 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1685 static inline void speedo_rx_link(struct net_device *dev, int entry,
1686 struct RxFD *rxf, dma_addr_t rxf_dma)
1688 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1689 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1690 rxf->link = 0; /* None yet. */
1691 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1692 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1693 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1694 pci_dma_sync_single(sp->pdev, sp->last_rxf_dma,
1695 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1697 sp->last_rxf_dma = rxf_dma;
1700 static int speedo_refill_rx_buf(struct net_device *dev, int force)
1702 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1706 entry = sp->dirty_rx % RX_RING_SIZE;
1707 if (sp->rx_skbuff[entry] == NULL) {
1708 rxf = speedo_rx_alloc(dev, entry);
1712 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1713 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1715 sp->rx_ring_state |= RrOOMReported;
1717 speedo_show_state(dev);
1719 return -1; /* Better luck next time! */
1720 /* Borrow an skb from one of next entries. */
1721 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1722 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1724 if (forw == sp->cur_rx)
1726 forw_entry = forw % RX_RING_SIZE;
1727 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1728 sp->rx_skbuff[forw_entry] = NULL;
1729 rxf = sp->rx_ringp[forw_entry];
1730 sp->rx_ringp[forw_entry] = NULL;
1731 sp->rx_ringp[entry] = rxf;
1734 rxf = sp->rx_ringp[entry];
1736 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1738 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1742 static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1744 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1746 /* Refill the RX ring. */
1747 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1748 speedo_refill_rx_buf(dev, force) != -1);
1752 speedo_rx(struct net_device *dev)
1754 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1755 int entry = sp->cur_rx % RX_RING_SIZE;
1756 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1760 if (netif_msg_intr(sp))
1761 printk(KERN_DEBUG " In speedo_rx().\n");
1762 /* If we own the next entry, it's a new packet. Send it up. */
1763 while (sp->rx_ringp[entry] != NULL) {
1767 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
1768 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1769 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1770 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1772 if (!(status & RxComplete))
1775 if (--rx_work_limit < 0)
1778 /* Check for a rare out-of-memory case: the current buffer is
1779 the last buffer allocated in the RX ring. --SAW */
1780 if (sp->last_rxf == sp->rx_ringp[entry]) {
1781 /* Postpone the packet. It'll be reaped at an interrupt when this
1782 packet is no longer the last packet in the ring. */
1783 if (netif_msg_rx_err(sp))
1784 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1786 sp->rx_ring_state |= RrPostponed;
1790 if (netif_msg_rx_status(sp))
1791 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1793 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1794 if (status & RxErrTooBig)
1795 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1796 "status %8.8x!\n", dev->name, status);
1797 else if (! (status & RxOK)) {
1798 /* There was a fatal error. This *should* be impossible. */
1799 sp->stats.rx_errors++;
1800 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1805 struct sk_buff *skb;
1807 /* Check if the packet is long enough to just accept without
1808 copying to a properly sized skbuff. */
1809 if (pkt_len < rx_copybreak
1810 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1812 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1813 /* 'skb_put()' points to the start of sk_buff data area. */
1814 pci_dma_sync_single(sp->pdev, sp->rx_ring_dma[entry],
1815 sizeof(struct RxFD) + pkt_len, PCI_DMA_FROMDEVICE);
1817 #if 1 || USE_IP_CSUM
1818 /* Packet is in one chunk -- we can copy + cksum. */
1819 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
1820 skb_put(skb, pkt_len);
1822 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
1827 /* Pass up the already-filled skbuff. */
1828 skb = sp->rx_skbuff[entry];
1830 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1834 sp->rx_skbuff[entry] = NULL;
1835 skb_put(skb, pkt_len);
1837 sp->rx_ringp[entry] = NULL;
1838 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1839 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1841 skb->protocol = eth_type_trans(skb, dev);
1843 sp->stats.rx_packets++;
1844 sp->stats.rx_bytes += pkt_len;
1846 entry = (++sp->cur_rx) % RX_RING_SIZE;
1847 sp->rx_ring_state &= ~RrPostponed;
1848 /* Refill the recently taken buffers.
1849 Do it one-by-one to handle traffic bursts better. */
1850 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1854 /* Try hard to refill the recently taken buffers. */
1855 speedo_refill_rx_buffers(dev, 1);
1858 sp->last_rx_time = jiffies;
1864 speedo_close(struct net_device *dev)
1866 long ioaddr = dev->base_addr;
1867 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1870 netdevice_stop(dev);
1871 netif_stop_queue(dev);
1873 if (netif_msg_ifdown(sp))
1874 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1875 dev->name, inw(ioaddr + SCBStatus));
1877 /* Shut off the media monitoring timer. */
1878 del_timer_sync(&sp->timer);
1880 outw(SCBMaskAll, ioaddr + SCBCmd);
1882 /* Shutting down the chip nicely fails to disable flow control. So.. */
1883 outl(PortPartialReset, ioaddr + SCBPort);
1884 inl(ioaddr + SCBPort); /* flush posted write */
1886 * The chip requires a 10 microsecond quiet period. Wait here!
1890 free_irq(dev->irq, dev);
1891 speedo_show_state(dev);
1893 /* Free all the skbuffs in the Rx and Tx queues. */
1894 for (i = 0; i < RX_RING_SIZE; i++) {
1895 struct sk_buff *skb = sp->rx_skbuff[i];
1896 sp->rx_skbuff[i] = 0;
1897 /* Clear the Rx descriptors. */
1899 pci_unmap_single(sp->pdev,
1901 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1906 for (i = 0; i < TX_RING_SIZE; i++) {
1907 struct sk_buff *skb = sp->tx_skbuff[i];
1908 sp->tx_skbuff[i] = 0;
1909 /* Clear the Tx descriptors. */
1911 pci_unmap_single(sp->pdev,
1912 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1913 skb->len, PCI_DMA_TODEVICE);
1918 /* Free multicast setting blocks. */
1919 for (i = 0; sp->mc_setup_head != NULL; i++) {
1920 struct speedo_mc_block *t;
1921 t = sp->mc_setup_head->next;
1922 kfree(sp->mc_setup_head);
1923 sp->mc_setup_head = t;
1925 sp->mc_setup_tail = NULL;
1926 if (netif_msg_ifdown(sp))
1927 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1929 pci_set_power_state(sp->pdev, 2);
1934 /* The Speedo-3 has an especially awkward and unusable method of getting
1935 statistics out of the chip. It takes an unpredictable length of time
1936 for the dump-stats command to complete. To avoid a busy-wait loop we
1937 update the stats with the previous dump results, and then trigger a
1940 Oh, and incoming frames are dropped while executing dump-stats!
1942 static struct net_device_stats *
1943 speedo_get_stats(struct net_device *dev)
1945 struct speedo_private *sp = (struct speedo_private *)dev->priv;
1946 long ioaddr = dev->base_addr;
1948 /* Update only if the previous dump finished. */
1949 if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
1950 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1951 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1952 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1953 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1954 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1955 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1956 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1957 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1958 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1959 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1960 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1961 sp->lstats->done_marker = 0x0000;
1962 if (netif_running(dev)) {
1963 unsigned long flags;
1964 /* Take a spinlock to make wait_for_cmd_done and sending the
1965 command atomic. --SAW */
1966 spin_lock_irqsave(&sp->lock, flags);
1967 wait_for_cmd_done(dev);
1968 outb(CUDumpStats, ioaddr + SCBCmd);
1969 spin_unlock_irqrestore(&sp->lock, flags);
1975 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1978 struct speedo_private *sp = dev->priv;
1980 if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
1984 /* get driver-specific version/etc. info */
1985 case ETHTOOL_GDRVINFO: {
1986 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
1987 strncpy(info.driver, "eepro100", sizeof(info.driver)-1);
1988 strncpy(info.version, version, sizeof(info.version)-1);
1990 strcpy(info.bus_info, sp->pdev->slot_name);
1991 if (copy_to_user(useraddr, &info, sizeof(info)))
1997 case ETHTOOL_GSET: {
1998 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1999 spin_lock_irq(&sp->lock);
2000 mii_ethtool_gset(&sp->mii_if, &ecmd);
2001 spin_unlock_irq(&sp->lock);
2002 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
2007 case ETHTOOL_SSET: {
2009 struct ethtool_cmd ecmd;
2010 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
2012 spin_lock_irq(&sp->lock);
2013 r = mii_ethtool_sset(&sp->mii_if, &ecmd);
2014 spin_unlock_irq(&sp->lock);
2017 /* restart autonegotiation */
2018 case ETHTOOL_NWAY_RST: {
2019 return mii_nway_restart(&sp->mii_if);
2021 /* get link status */
2022 case ETHTOOL_GLINK: {
2023 struct ethtool_value edata = {ETHTOOL_GLINK};
2024 edata.data = mii_link_ok(&sp->mii_if);
2025 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2029 /* get message-level */
2030 case ETHTOOL_GMSGLVL: {
2031 struct ethtool_value edata = {ETHTOOL_GMSGLVL};
2032 edata.data = sp->msg_enable;
2033 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2037 /* set message-level */
2038 case ETHTOOL_SMSGLVL: {
2039 struct ethtool_value edata;
2040 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2042 sp->msg_enable = edata.data;
2051 static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2053 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2054 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
2055 int phy = sp->phy[0] & 0x1f;
2060 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2061 case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
2064 case SIOCGMIIREG: /* Read MII PHY register. */
2065 case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
2066 /* FIXME: these operations need to be serialized with MDIO
2067 access from the timeout handler.
2068 They are currently serialized only with MDIO access from the
2069 timer routine. 2000/05/09 SAW */
2070 saved_acpi = pci_set_power_state(sp->pdev, 0);
2071 t = del_timer_sync(&sp->timer);
2072 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2074 add_timer(&sp->timer); /* may be set to the past --SAW */
2075 pci_set_power_state(sp->pdev, saved_acpi);
2078 case SIOCSMIIREG: /* Write MII PHY register. */
2079 case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
2080 if (!capable(CAP_NET_ADMIN))
2082 saved_acpi = pci_set_power_state(sp->pdev, 0);
2083 t = del_timer_sync(&sp->timer);
2084 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2086 add_timer(&sp->timer); /* may be set to the past --SAW */
2087 pci_set_power_state(sp->pdev, saved_acpi);
2090 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
2096 /* Set or clear the multicast filter for this adaptor.
2097 This is very ugly with Intel chips -- we usually have to execute an
2098 entire configuration command, plus process a multicast command.
2099 This is complicated. We must put a large configuration command and
2100 an arbitrarily-sized multicast command in the transmit list.
2101 To minimize the disruption -- the previous command might have already
2102 loaded the link -- we convert the current command block, normally a Tx
2103 command, into a no-op and link it to the new command.
2105 static void set_rx_mode(struct net_device *dev)
2107 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2108 long ioaddr = dev->base_addr;
2109 struct descriptor *last_cmd;
2111 unsigned long flags;
2114 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2116 } else if ((dev->flags & IFF_ALLMULTI) ||
2117 dev->mc_count > multicast_filter_limit) {
2122 if (netif_msg_rx_status(sp))
2123 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2124 sp->rx_mode, new_rx_mode);
2126 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2127 /* The Tx ring is full -- don't add anything! Hope the mode will be
2128 * set again later. */
2133 if (new_rx_mode != sp->rx_mode) {
2134 u8 *config_cmd_data;
2136 spin_lock_irqsave(&sp->lock, flags);
2137 entry = sp->cur_tx++ % TX_RING_SIZE;
2138 last_cmd = sp->last_cmd;
2139 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2141 sp->tx_skbuff[entry] = 0; /* Redundant. */
2142 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2143 sp->tx_ring[entry].link =
2144 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2145 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2146 /* Construct a full CmdConfig frame. */
2147 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2148 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2149 config_cmd_data[4] = rxdmacount;
2150 config_cmd_data[5] = txdmacount + 0x80;
2151 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2152 /* 0x80 doesn't disable FC 0x84 does.
2153 Disable Flow control since we are not ACK-ing any FC interrupts
2154 for now. --Dragan */
2155 config_cmd_data[19] = 0x84;
2156 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2157 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2158 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2159 config_cmd_data[15] |= 0x80;
2160 config_cmd_data[8] = 0;
2162 /* Trigger the command unit resume. */
2163 wait_for_cmd_done(dev);
2164 clear_suspend(last_cmd);
2165 outb(CUResume, ioaddr + SCBCmd);
2166 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2167 netif_stop_queue(dev);
2170 spin_unlock_irqrestore(&sp->lock, flags);
2173 if (new_rx_mode == 0 && dev->mc_count < 4) {
2174 /* The simple case of 0-3 multicast list entries occurs often, and
2175 fits within one tx_ring[] entry. */
2176 struct dev_mc_list *mclist;
2177 u16 *setup_params, *eaddrs;
2179 spin_lock_irqsave(&sp->lock, flags);
2180 entry = sp->cur_tx++ % TX_RING_SIZE;
2181 last_cmd = sp->last_cmd;
2182 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2184 sp->tx_skbuff[entry] = 0;
2185 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2186 sp->tx_ring[entry].link =
2187 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2188 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2189 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
2190 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2191 /* Fill in the multicast addresses. */
2192 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2193 i++, mclist = mclist->next) {
2194 eaddrs = (u16 *)mclist->dmi_addr;
2195 *setup_params++ = *eaddrs++;
2196 *setup_params++ = *eaddrs++;
2197 *setup_params++ = *eaddrs++;
2200 wait_for_cmd_done(dev);
2201 clear_suspend(last_cmd);
2202 /* Immediately trigger the command unit resume. */
2203 outb(CUResume, ioaddr + SCBCmd);
2205 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2206 netif_stop_queue(dev);
2209 spin_unlock_irqrestore(&sp->lock, flags);
2210 } else if (new_rx_mode == 0) {
2211 struct dev_mc_list *mclist;
2212 u16 *setup_params, *eaddrs;
2213 struct speedo_mc_block *mc_blk;
2214 struct descriptor *mc_setup_frm;
2217 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2219 if (mc_blk == NULL) {
2220 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2222 sp->rx_mode = -1; /* We failed, try again. */
2225 mc_blk->next = NULL;
2226 mc_blk->len = 2 + multicast_filter_limit*6;
2228 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2230 mc_setup_frm = &mc_blk->frame;
2232 /* Fill the setup frame. */
2233 if (netif_msg_ifup(sp))
2234 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2235 dev->name, mc_setup_frm);
2236 mc_setup_frm->cmd_status =
2237 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2238 /* Link set below. */
2239 setup_params = (u16 *)&mc_setup_frm->params;
2240 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2241 /* Fill in the multicast addresses. */
2242 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2243 i++, mclist = mclist->next) {
2244 eaddrs = (u16 *)mclist->dmi_addr;
2245 *setup_params++ = *eaddrs++;
2246 *setup_params++ = *eaddrs++;
2247 *setup_params++ = *eaddrs++;
2250 /* Disable interrupts while playing with the Tx Cmd list. */
2251 spin_lock_irqsave(&sp->lock, flags);
2253 if (sp->mc_setup_tail)
2254 sp->mc_setup_tail->next = mc_blk;
2256 sp->mc_setup_head = mc_blk;
2257 sp->mc_setup_tail = mc_blk;
2258 mc_blk->tx = sp->cur_tx;
2260 entry = sp->cur_tx++ % TX_RING_SIZE;
2261 last_cmd = sp->last_cmd;
2262 sp->last_cmd = mc_setup_frm;
2264 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2265 sp->tx_skbuff[entry] = 0;
2266 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2267 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2269 /* Set the link in the setup frame. */
2270 mc_setup_frm->link =
2271 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2273 pci_dma_sync_single(sp->pdev, mc_blk->frame_dma,
2274 mc_blk->len, PCI_DMA_TODEVICE);
2276 wait_for_cmd_done(dev);
2277 clear_suspend(last_cmd);
2278 /* Immediately trigger the command unit resume. */
2279 outb(CUResume, ioaddr + SCBCmd);
2281 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2282 netif_stop_queue(dev);
2285 spin_unlock_irqrestore(&sp->lock, flags);
2287 if (netif_msg_rx_status(sp))
2288 printk(" CmdMCSetup frame length %d in entry %d.\n",
2289 dev->mc_count, entry);
2292 sp->rx_mode = new_rx_mode;
2296 static int eepro100_suspend(struct pci_dev *pdev, u32 state)
2298 struct net_device *dev = pci_get_drvdata (pdev);
2299 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2300 long ioaddr = dev->base_addr;
2302 pci_save_state(pdev, sp->pm_state);
2304 if (!netif_running(dev))
2307 del_timer_sync(&sp->timer);
2309 netif_device_detach(dev);
2310 outl(PortPartialReset, ioaddr + SCBPort);
2312 /* XXX call pci_set_power_state ()? */
2316 static int eepro100_resume(struct pci_dev *pdev)
2318 struct net_device *dev = pci_get_drvdata (pdev);
2319 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2320 long ioaddr = dev->base_addr;
2322 pci_restore_state(pdev, sp->pm_state);
2324 if (!netif_running(dev))
2327 /* I'm absolutely uncertain if this part of code may work.
2329 - correct hardware reinitialization;
2330 - correct driver behavior between different steps of the
2332 - serialization with other driver calls.
2334 outw(SCBMaskAll, ioaddr + SCBCmd);
2336 netif_device_attach(dev);
2338 sp->flow_ctrl = sp->partner = 0;
2340 sp->timer.expires = RUN_AT(2*HZ);
2341 add_timer(&sp->timer);
2344 #endif /* CONFIG_PM */
2346 static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2348 struct net_device *dev = pci_get_drvdata (pdev);
2349 struct speedo_private *sp = (struct speedo_private *)dev->priv;
2351 unregister_netdev(dev);
2353 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2354 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2357 iounmap((char *)dev->base_addr);
2360 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2361 + sizeof(struct speedo_stats),
2362 sp->tx_ring, sp->tx_ring_dma);
2363 pci_disable_device(pdev);
2367 static struct pci_device_id eepro100_pci_tbl[] __devinitdata = {
2368 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82557,
2369 PCI_ANY_ID, PCI_ANY_ID, },
2370 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82559ER,
2371 PCI_ANY_ID, PCI_ANY_ID, },
2372 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_7,
2373 PCI_ANY_ID, PCI_ANY_ID, },
2374 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2375 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2376 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2377 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2378 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2379 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2380 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2381 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2382 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2383 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2384 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2385 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2386 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2387 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2388 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2389 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2390 { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2391 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2392 { PCI_VENDOR_ID_INTEL, 0x1228, PCI_ANY_ID, PCI_ANY_ID, },
2393 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2394 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2395 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2396 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2397 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2400 MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2402 static struct pci_driver eepro100_driver = {
2404 id_table: eepro100_pci_tbl,
2405 probe: eepro100_init_one,
2406 remove: __devexit_p(eepro100_remove_one),
2408 suspend: eepro100_suspend,
2409 resume: eepro100_resume,
2410 #endif /* CONFIG_PM */
2413 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,48)
2414 static int pci_module_init(struct pci_driver *pdev)
2418 rc = pci_register_driver(pdev);
2420 printk(KERN_INFO "%s: No cards found, driver not installed.\n",
2422 pci_unregister_driver(pdev);
2429 static int __init eepro100_init_module(void)
2434 return pci_module_init(&eepro100_driver);
2437 static void __exit eepro100_cleanup_module(void)
2439 pci_unregister_driver(&eepro100_driver);
2442 module_init(eepro100_init_module);
2443 module_exit(eepro100_cleanup_module);
2447 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"