1 /* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */
3 Written 1996-1999 by Donald Becker.
5 This software may be used and distributed according to the terms
6 of the GNU General Public License, incorporated herein by reference.
8 This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
9 Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
10 and the EtherLink XL 3c900 and 3c905 cards.
12 Problem reports and questions should be directed to
15 The author may be reached as becker@scyld.com, or C/O
16 Scyld Computing Corporation
17 410 Severn Ave., Suite 210
20 Linux Kernel Additions:
22 0.99H+lk0.9 - David S. Miller - softnet, PCI DMA updates
23 0.99H+lk1.0 - Jeff Garzik <jgarzik@pobox.com>
24 Remove compatibility defines for kernel versions < 2.2.x.
25 Update for new 2.3.x module interface
26 LK1.1.2 (March 19, 2000)
27 * New PCI interface (jgarzik)
29 LK1.1.3 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
30 - Merged with 3c575_cb.c
31 - Don't set RxComplete in boomerang interrupt enable reg
32 - spinlock in vortex_timer to protect mdio functions
33 - disable local interrupts around call to vortex_interrupt in
34 vortex_tx_timeout() (So vortex_interrupt can use spin_lock())
35 - Select window 3 in vortex_timer()'s write to Wn3_MAC_Ctrl
36 - In vortex_start_xmit(), move the lock to _after_ we've altered
37 vp->cur_tx and vp->tx_full. This defeats the race between
38 vortex_start_xmit() and vortex_interrupt which was identified
40 - Merged back support for six new cards from various sources
41 - Set vortex_have_pci if pci_module_init returns zero (fixes cardbus
43 - Tell it that 3c905C has NWAY for 100bT autoneg
44 - Fix handling of SetStatusEnd in 'Too much work..' code, as
45 per 2.3.99's 3c575_cb (Dave Hinds).
46 - Split ISR into two for vortex & boomerang
47 - Fix MOD_INC/DEC races
48 - Handle resource allocation failures.
49 - Fix 3CCFE575CT LED polarity
50 - Make tx_interrupt_mitigation the default
52 LK1.1.4 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
53 - Add extra TxReset to vortex_up() to fix 575_cb hotplug initialisation probs.
54 - Put vortex_info_tbl into __devinitdata
55 - In the vortex_error StatsFull HACK, disable stats in vp->intr_enable as well
57 - Increased the loop counter in issue_and_wait from 2,000 to 4,000.
59 LK1.1.5 28 April 2000, andrewm
60 - Added powerpc defines (John Daniel <jdaniel@etresoft.com> said these work...)
61 - Some extra diagnostics
62 - In vortex_error(), reset the Tx on maxCollisions. Otherwise most
63 chips usually get a Tx timeout.
64 - Added extra_reset module parm
65 - Replaced some inline timer manip with mod_timer
66 (Franois romieu <Francois.Romieu@nic.fr>)
67 - In vortex_up(), don't make Wn3_config initialisation dependent upon has_nway
68 (this came across from 3c575_cb).
70 LK1.1.6 06 Jun 2000, andrewm
71 - Backed out the PPC defines.
72 - Use del_timer_sync(), mod_timer().
73 - Fix wrapped ulong comparison in boomerang_rx()
74 - Add IS_TORNADO, use it to suppress 3c905C checksum error msg
75 (Donald Becker, I Lee Hetherington <ilh@sls.lcs.mit.edu>)
76 - Replace union wn3_config with BFINS/BFEXT manipulation for
77 sparc64 (Pete Zaitcev, Peter Jones)
78 - In vortex_error, do_tx_reset and vortex_tx_timeout(Vortex):
79 do a netif_wake_queue() to better recover from errors. (Anders Pedersen,
81 - Print a warning on out-of-memory (rate limited to 1 per 10 secs)
82 - Added two more Cardbus 575 NICs: 5b57 and 6564 (Paul Wagland)
84 LK1.1.7 2 Jul 2000 andrewm
85 - Better handling of shared IRQs
86 - Reset the transmitter on a Tx reclaim error
87 - Fixed crash under OOM during vortex_open() (Mark Hemment)
88 - Fix Rx cessation problem during OOM (help from Mark Hemment)
89 - The spinlocks around the mdio access were blocking interrupts for 300uS.
90 Fix all this to use spin_lock_bh() within mdio_read/write
91 - Only write to TxFreeThreshold if it's a boomerang - other NICs don't
93 - Added 802.3x MAC-layer flow control support
95 LK1.1.8 13 Aug 2000 andrewm
96 - Ignore request_region() return value - already reserved if Cardbus.
97 - Merged some additional Cardbus flags from Don's 0.99Qk
98 - Some fixes for 3c556 (Fred Maciel)
99 - Fix for EISA initialisation (Jan Rekorajski)
100 - Renamed MII_XCVR_PWR and EEPROM_230 to align with 3c575_cb and D. Becker's drivers
101 - Fixed MII_XCVR_PWR for 3CCFE575CT
102 - Added INVERT_LED_PWR, used it.
103 - Backed out the extra_reset stuff
105 LK1.1.9 12 Sep 2000 andrewm
106 - Backed out the tx_reset_resume flags. It was a no-op.
107 - In vortex_error, don't reset the Tx on txReclaim errors
108 - In vortex_error, don't reset the Tx on maxCollisions errors.
109 Hence backed out all the DownListPtr logic here.
110 - In vortex_error, give Tornado cards a partial TxReset on
111 maxCollisions (David Hinds). Defined MAX_COLLISION_RESET for this.
112 - Redid some driver flags and device names based on pcmcia_cs-3.1.20.
113 - Fixed a bug where, if vp->tx_full is set when the interface
114 is downed, it remains set when the interface is upped. Bad
117 LK1.1.10 17 Sep 2000 andrewm
118 - Added EEPROM_8BIT for 3c555 (Fred Maciel)
119 - Added experimental support for the 3c556B Laptop Hurricane (Louis Gerbarg)
120 - Add HAS_NWAY to "3c900 Cyclone 10Mbps TPO"
122 LK1.1.11 13 Nov 2000 andrewm
123 - Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER
125 LK1.1.12 1 Jan 2001 andrewm (2.4.0-pre1)
126 - Call pci_enable_device before we request our IRQ (Tobias Ringstrom)
127 - Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra)
128 - Added extended issue_and_wait for the 3c905CX.
129 - Look for an MII on PHY index 24 first (3c905CX oddity).
130 - Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger)
131 - Don't free skbs we don't own on oom path in vortex_open().
134 - Added explicit `medialock' flag so we can truly
135 lock the media type down with `options'.
136 - "check ioremap return and some tidbits" (Arnaldo Carvalho de Melo <acme@conectiva.com.br>)
137 - Added and used EEPROM_NORESET for 3c556B PM resumes.
138 - Fixed leakage of vp->rx_ring.
139 - Break out separate HAS_HWCKSM device capability flag.
140 - Kill vp->tx_full (ANK)
141 - Merge zerocopy fragment handling (ANK?)
144 - Enable WOL. Can be turned on with `enable_wol' module option.
145 - EISA and PCI initialisation fixes (jgarzik, Manfred Spraul)
146 - If a device's internalconfig register reports it has NWAY,
147 use it, even if autoselect is enabled.
149 LK1.1.15 6 June 2001 akpm
150 - Prevent double counting of received bytes (Lars Christensen)
151 - Add ethtool support (jgarzik)
152 - Add module parm descriptions (Andrzej M. Krzysztofowicz)
153 - Implemented alloc_etherdev() API
154 - Special-case the 'Tx error 82' message.
156 LK1.1.16 18 July 2001 akpm
157 - Make NETIF_F_SG dependent upon nr_free_highpages(), not on CONFIG_HIGHMEM
158 - Lessen verbosity of bootup messages
159 - Fix WOL - use new PM API functions.
160 - Use netif_running() instead of vp->open in suspend/resume.
161 - Don't reset the interface logic on open/close/rmmod. It upsets
162 autonegotiation, and hence DHCP (from 0.99T).
163 - Back out EEPROM_NORESET flag because of the above (we do it for all
165 - Correct 3c982 identification string
166 - Rename wait_for_completion() to issue_and_wait() to avoid completion.h
169 - See http://www.uow.edu.au/~andrewm/linux/#3c59x-2.3 for more details.
170 - Also see Documentation/networking/vortex.txt
174 * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation
175 * as well as other drivers
177 * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k
178 * due to dead code elimination. There will be some performance benefits from this due to
179 * elimination of all the tests and reduced cache footprint.
183 #define DRV_NAME "3c59x"
184 #define DRV_VERSION "LK1.1.16"
185 #define DRV_RELDATE "19 July 2001"
189 /* A few values that may be tweaked. */
190 /* Keep the ring sizes a power of two for efficiency. */
191 #define TX_RING_SIZE 16
192 #define RX_RING_SIZE 32
193 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
195 /* "Knobs" that adjust features and parameters. */
196 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
197 Setting to > 1512 effectively disables this feature. */
199 static const int rx_copybreak = 200;
201 /* ARM systems perform better by disregarding the bus-master
202 transfer capability of these cards. -- rmk */
203 static const int rx_copybreak = 1513;
205 /* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
206 static const int mtu = 1500;
207 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
208 static int max_interrupt_work = 32;
209 /* Tx timeout interval (millisecs) */
210 static int watchdog = 5000;
212 /* Allow aggregation of Tx interrupts. Saves CPU load at the cost
213 * of possible Tx stalls if the system is blocking interrupts
214 * somewhere else. Undefine this to disable.
216 #define tx_interrupt_mitigation 1
218 /* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */
219 #define vortex_debug debug
221 static int vortex_debug = VORTEX_DEBUG;
223 static int vortex_debug = 1;
227 #error You must compile this file with the correct options!
228 #error See the last lines of the source file.
229 #error You must compile this driver with "-O".
232 #include <linux/config.h>
233 #include <linux/module.h>
234 #include <linux/kernel.h>
235 #include <linux/sched.h>
236 #include <linux/string.h>
237 #include <linux/timer.h>
238 #include <linux/errno.h>
239 #include <linux/in.h>
240 #include <linux/ioport.h>
241 #include <linux/slab.h>
242 #include <linux/interrupt.h>
243 #include <linux/pci.h>
244 #include <linux/mii.h>
245 #include <linux/init.h>
246 #include <linux/netdevice.h>
247 #include <linux/etherdevice.h>
248 #include <linux/skbuff.h>
249 #include <linux/ethtool.h>
250 #include <linux/highmem.h>
251 #include <asm/irq.h> /* For NR_IRQS only. */
252 #include <asm/bitops.h>
254 #include <asm/uaccess.h>
256 /* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
257 This is only in the support-all-kernels source code. */
259 #define RUN_AT(x) (jiffies + (x))
261 #include <linux/delay.h>
264 static char version[] __devinitdata =
265 DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n";
267 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
268 MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "
269 DRV_VERSION " " DRV_RELDATE);
270 MODULE_LICENSE("GPL");
272 MODULE_PARM(debug, "i");
273 MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
274 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
275 MODULE_PARM(hw_checksums, "1-" __MODULE_STRING(8) "i");
276 MODULE_PARM(flow_ctrl, "1-" __MODULE_STRING(8) "i");
277 MODULE_PARM(enable_wol, "1-" __MODULE_STRING(8) "i");
278 MODULE_PARM(rx_copybreak, "i");
279 MODULE_PARM(max_interrupt_work, "i");
280 MODULE_PARM(compaq_ioaddr, "i");
281 MODULE_PARM(compaq_irq, "i");
282 MODULE_PARM(compaq_device_id, "i");
283 MODULE_PARM(watchdog, "i");
284 MODULE_PARM_DESC(debug, "3c59x debug level (0-6)");
285 MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex");
286 MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)");
287 MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)");
288 MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)");
289 MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)");
290 MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames");
291 MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt");
292 MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)");
293 MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)");
294 MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)");
295 MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds");
297 /* Operational parameter that usually are not changed. */
299 /* The Vortex size is twice that of the original EtherLinkIII series: the
300 runtime register window, window 1, is now always mapped in.
301 The Boomerang size is twice as large as the Vortex -- it has additional
302 bus master control registers. */
303 #define VORTEX_TOTAL_SIZE 0x20
304 #define BOOMERANG_TOTAL_SIZE 0x40
306 /* Set iff a MII transceiver on any interface requires mdio preamble.
307 This only set with the original DP83840 on older 3c905 boards, so the extra
308 code size of a per-interface flag is not worthwhile. */
309 static char mii_preamble_required;
311 #define PFX DRV_NAME ": "
318 I. Board Compatibility
320 This device driver is designed for the 3Com FastEtherLink and FastEtherLink
321 XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs
322 versions of the FastEtherLink cards. The supported product IDs are
323 3c590, 3c592, 3c595, 3c597, 3c900, 3c905
325 The related ISA 3c515 is supported with a separate driver, 3c515.c, included
326 with the kernel source or available from
327 cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html
329 II. Board-specific settings
331 PCI bus devices are configured by the system at boot time, so no jumpers
332 need to be set on the board. The system BIOS should be set to assign the
333 PCI INTA signal to an otherwise unused system IRQ line.
335 The EEPROM settings for media type and forced-full-duplex are observed.
336 The EEPROM media type should be left at the default "autoselect" unless using
337 10base2 or AUI connections which cannot be reliably detected.
339 III. Driver operation
341 The 3c59x series use an interface that's very similar to the previous 3c5x9
342 series. The primary interface is two programmed-I/O FIFOs, with an
343 alternate single-contiguous-region bus-master transfer (see next).
345 The 3c900 "Boomerang" series uses a full-bus-master interface with separate
346 lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
347 DEC Tulip and Intel Speedo3. The first chip version retains a compatible
348 programmed-I/O interface that has been removed in 'B' and subsequent board
351 One extension that is advertised in a very large font is that the adapters
352 are capable of being bus masters. On the Vortex chip this capability was
353 only for a single contiguous region making it far less useful than the full
354 bus master capability. There is a significant performance impact of taking
355 an extra interrupt or polling for the completion of each transfer, as well
356 as difficulty sharing the single transfer engine between the transmit and
357 receive threads. Using DMA transfers is a win only with large blocks or
358 with the flawed versions of the Intel Orion motherboard PCI controller.
360 The Boomerang chip's full-bus-master interface is useful, and has the
361 currently-unused advantages over other similar chips that queued transmit
362 packets may be reordered and receive buffer groups are associated with a
365 With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme.
366 Rather than a fixed intermediate receive buffer, this scheme allocates
367 full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as
368 the copying breakpoint: it is chosen to trade-off the memory wasted by
369 passing the full-sized skbuff to the queue layer for all frames vs. the
370 copying cost of copying a frame to a correctly-sized skbuff.
372 IIIC. Synchronization
373 The driver runs as two independent, single-threaded flows of control. One
374 is the send-packet routine, which enforces single-threaded use by the
375 dev->tbusy flag. The other thread is the interrupt handler, which is single
376 threaded by the hardware and other software.
380 Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development
381 3c590, 3c595, and 3c900 boards.
382 The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
383 the EISA version is called "Demon". According to Terry these names come
384 from rides at the local amusement park.
386 The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes!
387 This driver only supports ethernet packets because of the skbuff allocation
391 /* This table drives the PCI probe routines. It's mostly boilerplate in all
392 of the drivers, and will likely be provided by some future kernel.
395 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
396 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
399 enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8,
400 EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */
401 HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100,
402 INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800,
403 EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000 };
448 /* note: this array directly indexed by above enums, and MUST
449 * be kept in sync with both the enums above, and the PCI device
452 static struct vortex_chip_info {
457 } vortex_info_tbl[] __devinitdata = {
458 #define EISA_TBL_OFFSET 0 /* Offset of this entry for vortex_eisa_init */
459 {"3c590 Vortex 10Mbps",
460 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
461 {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
462 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
463 {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */
464 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
465 {"3c595 Vortex 100baseTx",
466 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
467 {"3c595 Vortex 100baseT4",
468 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
470 {"3c595 Vortex 100base-MII",
471 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
472 {"3c900 Boomerang 10baseT",
473 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, },
474 {"3c900 Boomerang 10Mbps Combo",
475 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, },
476 {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */
477 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
478 {"3c900 Cyclone 10Mbps Combo",
479 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
481 {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */
482 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
483 {"3c900B-FL Cyclone 10base-FL",
484 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
485 {"3c905 Boomerang 100baseTx",
486 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, },
487 {"3c905 Boomerang 100baseT4",
488 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, },
489 {"3c905B Cyclone 100baseTx",
490 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
492 {"3c905B Cyclone 10/100/BNC",
493 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
494 {"3c905B-FX Cyclone 100baseFx",
495 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
497 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
499 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
500 {"3c982 Dual Port Server Cyclone",
501 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
503 {"3cSOHO100-TX Hurricane",
504 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
505 {"3c555 Laptop Hurricane",
506 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
507 {"3c556 Laptop Tornado",
508 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
510 {"3c556B Laptop Hurricane",
511 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
513 {"3c575 [Megahertz] 10/100 LAN CardBus",
514 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
516 {"3c575 Boomerang CardBus",
517 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
518 {"3CCFE575BT Cyclone CardBus",
519 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
520 INVERT_LED_PWR|HAS_HWCKSM, 128, },
521 {"3CCFE575CT Tornado CardBus",
522 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
523 MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
524 {"3CCFE656 Cyclone CardBus",
525 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
526 INVERT_LED_PWR|HAS_HWCKSM, 128, },
527 {"3CCFEM656B Cyclone+Winmodem CardBus",
528 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
529 INVERT_LED_PWR|HAS_HWCKSM, 128, },
531 {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */
532 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
533 MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
534 {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */
535 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
537 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
538 {0,}, /* 0 terminated list. */
542 static struct pci_device_id vortex_pci_tbl[] __devinitdata = {
543 { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
544 { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
545 { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
546 { 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 },
547 { 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 },
549 { 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 },
550 { 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 },
551 { 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 },
552 { 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 },
553 { 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 },
555 { 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 },
556 { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL },
557 { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 },
558 { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 },
559 { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 },
561 { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 },
562 { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX },
563 { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C },
564 { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 },
565 { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 },
567 { 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX },
568 { 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 },
569 { 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 },
570 { 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B },
571 { 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 },
573 { 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 },
574 { 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 },
575 { 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT },
576 { 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 },
577 { 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 },
579 { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 },
580 { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 },
581 { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 },
582 {0,} /* 0 terminated list. */
584 MODULE_DEVICE_TABLE(pci, vortex_pci_tbl);
587 /* Operational definitions.
588 These are not used by other compilation units and thus are not
589 exported in a ".h" file.
591 First the windows. There are eight register windows, with the command
592 and status registers available in each.
594 #define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
596 #define EL3_STATUS 0x0e
598 /* The top five bits written to EL3_CMD are a command, the lower
599 11 bits are the parameter, if applicable.
600 Note that 11 parameters bits was fine for ethernet, but the new chip
601 can handle FDDI length frames (~4500 octets) and now parameters count
602 32-bit 'Dwords' rather than octets. */
605 TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
606 RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
607 UpStall = 6<<11, UpUnstall = (6<<11)+1,
608 DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
609 RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
610 FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
611 SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
612 SetTxThreshold = 18<<11, SetTxStart = 19<<11,
613 StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
614 StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,};
616 /* The SetRxFilter command accepts the following classes: */
618 RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
620 /* Bits in the general status register. */
622 IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
623 TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
624 IntReq = 0x0040, StatsFull = 0x0080,
625 DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
626 DMAInProgress = 1<<11, /* DMA controller is still busy.*/
627 CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/
630 /* Register window 1 offsets, the window used in normal operation.
631 On the Vortex this window is always mapped at offsets 0x10-0x1f. */
633 TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14,
634 RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B,
635 TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
638 Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */
639 Wn0EepromData = 12, /* Window 0: EEPROM results register. */
640 IntrStatus=0x0E, /* Valid in all windows. */
642 enum Win0_EEPROM_bits {
643 EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
644 EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
645 EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
647 /* EEPROM locations. */
649 PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
650 EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
651 NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
652 DriverTune=13, Checksum=15};
654 enum Window2 { /* Window 2. */
657 enum Window3 { /* Window 3: MAC/config bits. */
658 Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8,
661 #define BFEXT(value, offset, bitcount) \
662 ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1))
664 #define BFINS(lhs, rhs, offset, bitcount) \
665 (((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \
666 (((rhs) & ((1 << (bitcount)) - 1)) << (offset)))
668 #define RAM_SIZE(v) BFEXT(v, 0, 3)
669 #define RAM_WIDTH(v) BFEXT(v, 3, 1)
670 #define RAM_SPEED(v) BFEXT(v, 4, 2)
671 #define ROM_SIZE(v) BFEXT(v, 6, 2)
672 #define RAM_SPLIT(v) BFEXT(v, 16, 2)
673 #define XCVR(v) BFEXT(v, 20, 4)
674 #define AUTOSELECT(v) BFEXT(v, 24, 1)
676 enum Window4 { /* Window 4: Xcvr/media bits. */
677 Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
679 enum Win4_Media_bits {
680 Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */
681 Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */
682 Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */
683 Media_LnkBeat = 0x0800,
685 enum Window7 { /* Window 7: Bus Master control. */
686 Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12,
688 /* Boomerang bus master control registers. */
690 PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
691 TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38,
694 /* The Rx and Tx descriptor lists.
695 Caution Alpha hackers: these types are 32 bits! Note also the 8 byte
696 alignment contraint on tx_ring[] and rx_ring[]. */
697 #define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */
698 #define DN_COMPLETE 0x00010000 /* This packet has been downloaded */
699 struct boom_rx_desc {
700 u32 next; /* Last entry points to 0. */
702 u32 addr; /* Up to 63 addr/len pairs possible. */
703 s32 length; /* Set LAST_FRAG to indicate last pair. */
705 /* Values for the Rx status entry. */
706 enum rx_desc_status {
707 RxDComplete=0x00008000, RxDError=0x4000,
708 /* See boomerang_rx() for actual error bits */
709 IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27,
710 IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
714 #define DO_ZEROCOPY 1
716 #define DO_ZEROCOPY 0
719 struct boom_tx_desc {
720 u32 next; /* Last entry points to 0. */
721 s32 status; /* bits 0:12 length, others see below. */
726 } frag[1+MAX_SKB_FRAGS];
733 /* Values for the Tx status entry. */
734 enum tx_desc_status {
735 CRCDisable=0x2000, TxDComplete=0x8000,
736 AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
737 TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */
740 /* Chip features we care about in vp->capabilities, read from the EEPROM. */
741 enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 };
743 struct vortex_private {
744 /* The Rx and Tx rings should be quad-word-aligned. */
745 struct boom_rx_desc* rx_ring;
746 struct boom_tx_desc* tx_ring;
747 dma_addr_t rx_ring_dma;
748 dma_addr_t tx_ring_dma;
749 /* The addresses of transmit- and receive-in-place skbuffs. */
750 struct sk_buff* rx_skbuff[RX_RING_SIZE];
751 struct sk_buff* tx_skbuff[TX_RING_SIZE];
752 struct net_device *next_module; /* NULL if PCI device */
753 unsigned int cur_rx, cur_tx; /* The next free ring entry */
754 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
755 struct net_device_stats stats;
756 struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
757 dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
759 /* PCI configuration space information. */
760 struct pci_dev *pdev;
761 char *cb_fn_base; /* CardBus function status addr space. */
763 /* Some values here only for performance evaluation and path-coverage */
764 int rx_nocopy, rx_copy, queued_packet, rx_csumhits;
767 /* The remainder are related to chip state, mostly media selection. */
768 struct timer_list timer; /* Media selection timer. */
769 struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
770 int options; /* User-settable misc. driver options. */
771 unsigned int media_override:4, /* Passed-in media type. */
772 default_media:4, /* Read from the EEPROM/Wn3_Config. */
773 full_duplex:1, force_fd:1, autoselect:1,
774 bus_master:1, /* Vortex can only do a fragment bus-m. */
775 full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */
776 flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */
777 partner_flow_ctrl:1, /* Partner supports flow control */
779 enable_wol:1, /* Wake-on-LAN is enabled */
780 pm_state_valid:1, /* power_state[] has sane contents */
783 must_free_region:1; /* Flag: if zero, Cardbus owns the I/O region */
787 u16 available_media; /* From Wn3_Options. */
788 u16 capabilities, info1, info2; /* Various, from EEPROM. */
789 u16 advertising; /* NWay media advertisement */
790 unsigned char phys[2]; /* MII device addresses. */
791 u16 deferred; /* Resend these interrupts when we
792 * bale from the ISR */
793 u16 io_size; /* Size of PCI region (for release_region) */
794 spinlock_t lock; /* Serialise access to device & its vortex_private */
795 spinlock_t mdio_lock; /* Serialise access to mdio hardware */
799 /* The action to take with a media selection timer tick.
800 Note that we deviate from the 3Com order by checking 10base2 before AUI.
803 XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
804 XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
807 static struct media_table {
809 unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */
810 mask:8, /* The transceiver-present bit in Wn3_Config.*/
811 next:8; /* The media type to try next. */
812 int wait; /* Time before we check media status. */
814 { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
815 { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
816 { "undefined", 0, 0x80, XCVR_10baseT, 10000},
817 { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10},
818 { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
819 { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10},
820 { "MII", 0, 0x41, XCVR_10baseT, 3*HZ },
821 { "undefined", 0, 0x01, XCVR_10baseT, 10000},
822 { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ},
823 { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ },
824 { "Default", 0, 0xFF, XCVR_10baseT, 10000},
827 static int vortex_probe1(struct pci_dev *pdev, long ioaddr, int irq,
828 int chip_idx, int card_idx);
829 static void vortex_up(struct net_device *dev);
830 static void vortex_down(struct net_device *dev);
831 static int vortex_open(struct net_device *dev);
832 static void mdio_sync(long ioaddr, int bits);
833 static int mdio_read(struct net_device *dev, int phy_id, int location);
834 static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
835 static void vortex_timer(unsigned long arg);
836 static void rx_oom_timer(unsigned long arg);
837 static int vortex_start_xmit(struct sk_buff *skb, struct net_device *dev);
838 static int boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev);
839 static int vortex_rx(struct net_device *dev);
840 static int boomerang_rx(struct net_device *dev);
841 static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs);
842 static void boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs);
843 static int vortex_close(struct net_device *dev);
844 static void dump_tx_ring(struct net_device *dev);
845 static void update_stats(long ioaddr, struct net_device *dev);
846 static struct net_device_stats *vortex_get_stats(struct net_device *dev);
847 static void set_rx_mode(struct net_device *dev);
848 static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
849 static void vortex_tx_timeout(struct net_device *dev);
850 static void acpi_set_WOL(struct net_device *dev);
852 /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
853 /* Option count limit only -- unlimited interfaces are supported. */
855 static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1,};
856 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
857 static int hw_checksums[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
858 static int flow_ctrl[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
859 static int enable_wol[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
861 /* #define dev_alloc_skb dev_alloc_skb_debug */
863 /* A list of all installed Vortex EISA devices, for removing the driver module. */
864 static struct net_device *root_vortex_eisa_dev;
866 /* Variables to work-around the Compaq PCI BIOS32 problem. */
867 static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900;
869 static int vortex_cards_found;
873 static int vortex_suspend (struct pci_dev *pdev, u32 state)
875 struct net_device *dev = pci_get_drvdata(pdev);
877 if (dev && dev->priv) {
878 if (netif_running(dev)) {
879 netif_device_detach(dev);
886 static int vortex_resume (struct pci_dev *pdev)
888 struct net_device *dev = pci_get_drvdata(pdev);
890 if (dev && dev->priv) {
891 if (netif_running(dev)) {
893 netif_device_attach(dev);
899 #endif /* CONFIG_PM */
901 /* returns count found (>= 0), or negative on error */
902 static int __init vortex_eisa_init (void)
906 int orig_cards_found = vortex_cards_found;
908 /* Now check all slots of the EISA bus. */
912 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
915 if (request_region(ioaddr, VORTEX_TOTAL_SIZE, DRV_NAME) == NULL)
918 /* Check the standard EISA ID register for an encoded '3Com'. */
919 if (inw(ioaddr + 0xC80) != 0x6d50) {
920 release_region (ioaddr, VORTEX_TOTAL_SIZE);
924 /* Check for a product that we support, 3c59{2,7} any rev. */
925 device_id = (inb(ioaddr + 0xC82)<<8) + inb(ioaddr + 0xC83);
926 if ((device_id & 0xFF00) != 0x5900) {
927 release_region (ioaddr, VORTEX_TOTAL_SIZE);
931 rc = vortex_probe1(NULL, ioaddr, inw(ioaddr + 0xC88) >> 12,
932 EISA_TBL_OFFSET, vortex_cards_found);
934 vortex_cards_found++;
936 release_region (ioaddr, VORTEX_TOTAL_SIZE);
939 /* Special code to work-around the Compaq PCI BIOS32 problem. */
941 vortex_probe1(NULL, compaq_ioaddr, compaq_irq,
942 compaq_device_id, vortex_cards_found++);
945 return vortex_cards_found - orig_cards_found;
948 /* returns count (>= 0), or negative on error */
949 static int __devinit vortex_init_one (struct pci_dev *pdev,
950 const struct pci_device_id *ent)
954 /* wake up and enable device */
955 if (pci_enable_device (pdev)) {
958 rc = vortex_probe1 (pdev, pci_resource_start (pdev, 0), pdev->irq,
959 ent->driver_data, vortex_cards_found);
961 vortex_cards_found++;
967 * Start up the PCI device which is described by *pdev.
968 * Return 0 on success.
970 * NOTE: pdev can be NULL, for the case of an EISA driver
972 static int __devinit vortex_probe1(struct pci_dev *pdev,
973 long ioaddr, int irq,
974 int chip_idx, int card_idx)
976 struct vortex_private *vp;
978 unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
980 struct net_device *dev;
981 static int printed_version;
983 struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx];
986 if (!printed_version) {
991 print_name = pdev ? pdev->slot_name : "3c59x";
993 dev = alloc_etherdev(sizeof(*vp));
996 printk (KERN_ERR PFX "unable to allocate etherdev, aborting\n");
999 SET_MODULE_OWNER(dev);
1002 /* The lower four bits are the media type. */
1003 if (dev->mem_start) {
1005 * The 'options' param is passed in as the third arg to the
1006 * LILO 'ether=' argument for non-modular use
1008 option = dev->mem_start;
1010 else if (card_idx < MAX_UNITS)
1011 option = options[card_idx];
1016 if (option & 0x8000)
1018 if (option & 0x4000)
1020 if (option & 0x0400)
1024 printk (KERN_INFO "See Documentation/networking/vortex.txt\n");
1025 printk(KERN_INFO "%s: 3Com %s %s at 0x%lx. Vers " DRV_VERSION "\n",
1027 pdev ? "PCI" : "EISA",
1031 dev->base_addr = ioaddr;
1034 vp->drv_flags = vci->drv_flags;
1035 vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
1036 vp->io_size = vci->io_size;
1037 vp->card_idx = card_idx;
1039 /* module list only for EISA devices */
1041 vp->next_module = root_vortex_eisa_dev;
1042 root_vortex_eisa_dev = dev;
1045 /* PCI-only startup logic */
1047 /* EISA resources already marked, so only PCI needs to do this here */
1048 /* Ignore return value, because Cardbus drivers already allocate for us */
1049 if (request_region(ioaddr, vci->io_size, print_name) != NULL)
1050 vp->must_free_region = 1;
1052 /* enable bus-mastering if necessary */
1053 if (vci->flags & PCI_USES_MASTER)
1054 pci_set_master (pdev);
1056 if (vci->drv_flags & IS_VORTEX) {
1058 u8 new_latency = 248;
1060 /* Check the PCI latency value. On the 3c590 series the latency timer
1061 must be set to the maximum value to avoid data corruption that occurs
1062 when the timer expires during a transfer. This bug exists the Vortex
1064 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
1065 if (pci_latency < new_latency) {
1066 printk(KERN_INFO "%s: Overriding PCI latency"
1067 " timer (CFLT) setting of %d, new value is %d.\n",
1068 print_name, pci_latency, new_latency);
1069 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
1074 spin_lock_init(&vp->lock);
1075 spin_lock_init(&vp->mdio_lock);
1078 /* Makes sure rings are at least 16 byte aligned. */
1079 vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
1080 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1083 if (vp->rx_ring == 0)
1086 vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
1087 vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
1089 /* if we are a PCI driver, we store info in pdev->driver_data
1090 * instead of a module list */
1092 pci_set_drvdata(pdev, dev);
1094 vp->media_override = 7;
1096 vp->media_override = ((option & 7) == 2) ? 0 : option & 15;
1097 if (vp->media_override != 7)
1099 vp->full_duplex = (option & 0x200) ? 1 : 0;
1100 vp->bus_master = (option & 16) ? 1 : 0;
1103 if (card_idx < MAX_UNITS) {
1104 if (full_duplex[card_idx] > 0)
1105 vp->full_duplex = 1;
1106 if (flow_ctrl[card_idx] > 0)
1108 if (enable_wol[card_idx] > 0)
1112 vp->force_fd = vp->full_duplex;
1113 vp->options = option;
1114 /* Read the station address from the EEPROM. */
1119 if (vci->drv_flags & EEPROM_8BIT)
1121 else if (vci->drv_flags & EEPROM_OFFSET)
1122 base = EEPROM_Read + 0x30;
1126 for (i = 0; i < 0x40; i++) {
1128 outw(base + i, ioaddr + Wn0EepromCmd);
1129 /* Pause for at least 162 us. for the read to take place. */
1130 for (timer = 10; timer >= 0; timer--) {
1132 if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
1135 eeprom[i] = inw(ioaddr + Wn0EepromData);
1138 for (i = 0; i < 0x18; i++)
1139 checksum ^= eeprom[i];
1140 checksum = (checksum ^ (checksum >> 8)) & 0xff;
1141 if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */
1143 checksum ^= eeprom[i++];
1144 checksum = (checksum ^ (checksum >> 8)) & 0xff;
1146 if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
1147 printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
1148 for (i = 0; i < 3; i++)
1149 ((u16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
1150 for (i = 0; i < 6; i++)
1151 printk("%c%2.2x", i ? ':' : ' ', dev->dev_addr[i]);
1153 for (i = 0; i < 6; i++)
1154 outb(dev->dev_addr[i], ioaddr + i);
1157 printk(", IRQ %s\n", __irq_itoa(dev->irq));
1159 printk(", IRQ %d\n", dev->irq);
1160 /* Tell them about an invalid IRQ. */
1161 if (dev->irq <= 0 || dev->irq >= NR_IRQS)
1162 printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n",
1167 step = (inb(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
1168 printk(KERN_INFO " product code %02x%02x rev %02x.%d date %02d-"
1169 "%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
1170 step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
1172 if (pdev && vci->drv_flags & HAS_CB_FNS) {
1173 unsigned long fn_st_addr; /* Cardbus function status space */
1176 fn_st_addr = pci_resource_start (pdev, 2);
1178 vp->cb_fn_base = ioremap(fn_st_addr, 128);
1180 if (!vp->cb_fn_base)
1183 printk(KERN_INFO "%s: CardBus functions mapped %8.8lx->%p\n",
1184 print_name, fn_st_addr, vp->cb_fn_base);
1187 n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010;
1188 if (vp->drv_flags & INVERT_LED_PWR)
1190 if (vp->drv_flags & INVERT_MII_PWR)
1192 outw(n, ioaddr + Wn2_ResetOptions);
1195 /* Extract our information from the EEPROM data. */
1196 vp->info1 = eeprom[13];
1197 vp->info2 = eeprom[15];
1198 vp->capabilities = eeprom[16];
1200 if (vp->info1 & 0x8000) {
1201 vp->full_duplex = 1;
1202 printk(KERN_INFO "Full duplex capable\n");
1206 static const char * ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
1207 unsigned int config;
1209 vp->available_media = inw(ioaddr + Wn3_Options);
1210 if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
1211 vp->available_media = 0x40;
1212 config = inl(ioaddr + Wn3_Config);
1213 printk(KERN_DEBUG " Internal config register is %4.4x, "
1214 "transceivers %#x.\n", config,
1215 inw(ioaddr + Wn3_Options));
1216 printk(KERN_INFO " %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
1217 8 << RAM_SIZE(config),
1218 RAM_WIDTH(config) ? "word" : "byte",
1219 ram_split[RAM_SPLIT(config)],
1220 AUTOSELECT(config) ? "autoselect/" : "",
1221 XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" :
1222 media_tbl[XCVR(config)].name);
1223 vp->default_media = XCVR(config);
1224 if (vp->default_media == XCVR_NWAY)
1226 vp->autoselect = AUTOSELECT(config);
1229 if (vp->media_override != 7) {
1230 printk(KERN_INFO "%s: Media override to transceiver type %d (%s).\n",
1231 print_name, vp->media_override,
1232 media_tbl[vp->media_override].name);
1233 dev->if_port = vp->media_override;
1235 dev->if_port = vp->default_media;
1237 if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1238 int phy, phy_idx = 0;
1240 mii_preamble_required++;
1241 mii_preamble_required++;
1242 mdio_read(dev, 24, 1);
1243 for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
1244 int mii_status, phyx;
1247 * For the 3c905CX we look at index 24 first, because it bogusly
1248 * reports an external PHY at all indices
1256 mii_status = mdio_read(dev, phyx, 1);
1257 if (mii_status && mii_status != 0xffff) {
1258 vp->phys[phy_idx++] = phyx;
1259 printk(KERN_INFO " MII transceiver found at "
1260 "address %d, status %4x.\n",
1262 if ((mii_status & 0x0040) == 0)
1263 mii_preamble_required++;
1266 mii_preamble_required--;
1268 printk(KERN_WARNING" ***WARNING*** No MII transceivers found!\n");
1271 vp->advertising = mdio_read(dev, vp->phys[0], 4);
1272 if (vp->full_duplex) {
1273 /* Only advertise the FD media types. */
1274 vp->advertising &= ~0x02A0;
1275 mdio_write(dev, vp->phys[0], 4, vp->advertising);
1280 if (vp->capabilities & CapBusMaster) {
1281 vp->full_bus_master_tx = 1;
1282 printk(KERN_INFO " Enabling bus-master transmits and %s "
1284 (vp->info2 & 1) ? "early" : "whole-frame" );
1285 vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
1286 vp->bus_master = 0; /* AKPM: vortex only */
1289 /* The 3c59x-specific entries in the device structure. */
1290 dev->open = vortex_open;
1291 if (vp->full_bus_master_tx) {
1292 dev->hard_start_xmit = boomerang_start_xmit;
1293 /* Actually, it still should work with iommu. */
1294 dev->features |= NETIF_F_SG;
1295 if (((hw_checksums[card_idx] == -1) && (vp->drv_flags & HAS_HWCKSM)) ||
1296 (hw_checksums[card_idx] == 1)) {
1297 dev->features |= NETIF_F_IP_CSUM;
1300 dev->hard_start_xmit = vortex_start_xmit;
1303 printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n",
1305 (dev->features & NETIF_F_SG) ? "en":"dis",
1306 (dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
1308 dev->stop = vortex_close;
1309 dev->get_stats = vortex_get_stats;
1310 dev->do_ioctl = vortex_ioctl;
1311 dev->set_multicast_list = set_rx_mode;
1312 dev->tx_timeout = vortex_tx_timeout;
1313 dev->watchdog_timeo = (watchdog * HZ) / 1000;
1314 if (pdev && vp->enable_wol) {
1315 vp->pm_state_valid = 1;
1316 pci_save_state(vp->pdev, vp->power_state);
1319 retval = register_netdev(dev);
1324 pci_free_consistent(pdev,
1325 sizeof(struct boom_rx_desc) * RX_RING_SIZE
1326 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1330 if (vp->must_free_region)
1331 release_region(ioaddr, vci->io_size);
1333 printk(KERN_ERR PFX "vortex_probe1 fails. Returns %d\n", retval);
1339 issue_and_wait(struct net_device *dev, int cmd)
1343 outw(cmd, dev->base_addr + EL3_CMD);
1344 for (i = 0; i < 2000; i++) {
1345 if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress))
1349 /* OK, that didn't work. Do it the slow way. One second */
1350 for (i = 0; i < 100000; i++) {
1351 if (!(inw(dev->base_addr + EL3_STATUS) & CmdInProgress)) {
1352 if (vortex_debug > 1)
1353 printk(KERN_INFO "%s: command 0x%04x took %d usecs\n",
1354 dev->name, cmd, i * 10);
1359 printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n",
1360 dev->name, cmd, inw(dev->base_addr + EL3_STATUS));
1364 vortex_up(struct net_device *dev)
1366 long ioaddr = dev->base_addr;
1367 struct vortex_private *vp = (struct vortex_private *)dev->priv;
1368 unsigned int config;
1371 if (vp->pdev && vp->enable_wol) {
1372 pci_set_power_state(vp->pdev, 0); /* Go active */
1373 pci_restore_state(vp->pdev, vp->power_state);
1376 /* Before initializing select the active media port. */
1378 config = inl(ioaddr + Wn3_Config);
1380 if (vp->media_override != 7) {
1381 printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
1382 dev->name, vp->media_override,
1383 media_tbl[vp->media_override].name);
1384 dev->if_port = vp->media_override;
1385 } else if (vp->autoselect) {
1387 if (vortex_debug > 1)
1388 printk(KERN_INFO "%s: using NWAY device table, not %d\n",
1389 dev->name, dev->if_port);
1390 dev->if_port = XCVR_NWAY;
1392 /* Find first available media type, starting with 100baseTx. */
1393 dev->if_port = XCVR_100baseTx;
1394 while (! (vp->available_media & media_tbl[dev->if_port].mask))
1395 dev->if_port = media_tbl[dev->if_port].next;
1396 if (vortex_debug > 1)
1397 printk(KERN_INFO "%s: first available media type: %s\n",
1398 dev->name, media_tbl[dev->if_port].name);
1401 dev->if_port = vp->default_media;
1402 if (vortex_debug > 1)
1403 printk(KERN_INFO "%s: using default media %s\n",
1404 dev->name, media_tbl[dev->if_port].name);
1407 init_timer(&vp->timer);
1408 vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
1409 vp->timer.data = (unsigned long)dev;
1410 vp->timer.function = vortex_timer; /* timer handler */
1411 add_timer(&vp->timer);
1413 init_timer(&vp->rx_oom_timer);
1414 vp->rx_oom_timer.data = (unsigned long)dev;
1415 vp->rx_oom_timer.function = rx_oom_timer;
1417 if (vortex_debug > 1)
1418 printk(KERN_DEBUG "%s: Initial media type %s.\n",
1419 dev->name, media_tbl[dev->if_port].name);
1421 vp->full_duplex = vp->force_fd;
1422 config = BFINS(config, dev->if_port, 20, 4);
1423 if (vortex_debug > 6)
1424 printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
1425 outl(config, ioaddr + Wn3_Config);
1427 if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1428 int mii_reg1, mii_reg5;
1430 /* Read BMSR (reg1) only to clear old status. */
1431 mii_reg1 = mdio_read(dev, vp->phys[0], 1);
1432 mii_reg5 = mdio_read(dev, vp->phys[0], 5);
1433 if (mii_reg5 == 0xffff || mii_reg5 == 0x0000) {
1434 netif_carrier_off(dev); /* No MII device or no link partner report */
1436 if ((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */
1437 || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */
1438 vp->full_duplex = 1;
1439 netif_carrier_on(dev);
1441 vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
1442 if (vortex_debug > 1)
1443 printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
1444 " info1 %04x, setting %s-duplex.\n",
1445 dev->name, vp->phys[0],
1447 vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half");
1451 /* Set the full-duplex bit. */
1452 outw( ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1453 (dev->mtu > 1500 ? 0x40 : 0) |
1454 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
1455 ioaddr + Wn3_MAC_Ctrl);
1457 if (vortex_debug > 1) {
1458 printk(KERN_DEBUG "%s: vortex_up() InternalConfig %8.8x.\n",
1462 issue_and_wait(dev, TxReset);
1464 * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
1466 issue_and_wait(dev, RxReset|0x04);
1468 outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
1470 if (vortex_debug > 1) {
1472 printk(KERN_DEBUG "%s: vortex_up() irq %d media status %4.4x.\n",
1473 dev->name, dev->irq, inw(ioaddr + Wn4_Media));
1476 /* Set the station address and mask in window 2 each time opened. */
1478 for (i = 0; i < 6; i++)
1479 outb(dev->dev_addr[i], ioaddr + i);
1480 for (; i < 12; i+=2)
1481 outw(0, ioaddr + i);
1483 if (vp->cb_fn_base) {
1484 unsigned short n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010;
1485 if (vp->drv_flags & INVERT_LED_PWR)
1487 if (vp->drv_flags & INVERT_MII_PWR)
1489 outw(n, ioaddr + Wn2_ResetOptions);
1492 if (dev->if_port == XCVR_10base2)
1493 /* Start the thinnet transceiver. We should really wait 50ms...*/
1494 outw(StartCoax, ioaddr + EL3_CMD);
1495 if (dev->if_port != XCVR_NWAY) {
1497 outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
1498 media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
1501 /* Switch to the stats window, and clear all stats by reading. */
1502 outw(StatsDisable, ioaddr + EL3_CMD);
1504 for (i = 0; i < 10; i++)
1508 /* New: On the Vortex we must also clear the BadSSD counter. */
1511 /* ..and on the Boomerang we enable the extra statistics bits. */
1512 outw(0x0040, ioaddr + Wn4_NetDiag);
1514 /* Switch to register set 7 for normal use. */
1517 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1518 vp->cur_rx = vp->dirty_rx = 0;
1519 /* Initialize the RxEarly register as recommended. */
1520 outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
1521 outl(0x0020, ioaddr + PktStatus);
1522 outl(vp->rx_ring_dma, ioaddr + UpListPtr);
1524 if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
1525 vp->cur_tx = vp->dirty_tx = 0;
1526 if (vp->drv_flags & IS_BOOMERANG)
1527 outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
1528 /* Clear the Rx, Tx rings. */
1529 for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */
1530 vp->rx_ring[i].status = 0;
1531 for (i = 0; i < TX_RING_SIZE; i++)
1532 vp->tx_skbuff[i] = 0;
1533 outl(0, ioaddr + DownListPtr);
1535 /* Set receiver mode: presumably accept b-case and phys addr only. */
1537 outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
1539 // issue_and_wait(dev, SetTxStart|0x07ff);
1540 outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
1541 outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
1542 /* Allow status bits to be seen. */
1543 vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
1544 (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
1545 (vp->full_bus_master_rx ? UpComplete : RxComplete) |
1546 (vp->bus_master ? DMADone : 0);
1547 vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
1548 (vp->full_bus_master_rx ? 0 : RxComplete) |
1549 StatsFull | HostError | TxComplete | IntReq
1550 | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
1551 outw(vp->status_enable, ioaddr + EL3_CMD);
1552 /* Ack all pending events, and set active indicator mask. */
1553 outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
1555 outw(vp->intr_enable, ioaddr + EL3_CMD);
1556 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
1557 writel(0x8000, vp->cb_fn_base + 4);
1558 netif_start_queue (dev);
1562 vortex_open(struct net_device *dev)
1564 struct vortex_private *vp = (struct vortex_private *)dev->priv;
1568 /* Use the now-standard shared IRQ implementation. */
1569 if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
1570 &boomerang_interrupt : &vortex_interrupt, SA_SHIRQ, dev->name, dev))) {
1571 printk(KERN_ERR "%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
1575 if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1576 if (vortex_debug > 2)
1577 printk(KERN_DEBUG "%s: Filling in the Rx ring.\n", dev->name);
1578 for (i = 0; i < RX_RING_SIZE; i++) {
1579 struct sk_buff *skb;
1580 vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
1581 vp->rx_ring[i].status = 0; /* Clear complete bit. */
1582 vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
1583 skb = dev_alloc_skb(PKT_BUF_SZ);
1584 vp->rx_skbuff[i] = skb;
1586 break; /* Bad news! */
1587 skb->dev = dev; /* Mark as being used by this device. */
1588 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1589 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1591 if (i != RX_RING_SIZE) {
1593 printk(KERN_EMERG "%s: no memory for rx ring\n", dev->name);
1594 for (j = 0; j < i; j++) {
1595 if (vp->rx_skbuff[j]) {
1596 dev_kfree_skb(vp->rx_skbuff[j]);
1597 vp->rx_skbuff[j] = 0;
1603 /* Wrap the ring. */
1604 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
1611 free_irq(dev->irq, dev);
1613 if (vortex_debug > 1)
1614 printk(KERN_ERR "%s: vortex_open() fails: returning %d\n", dev->name, retval);
1619 vortex_timer(unsigned long data)
1621 struct net_device *dev = (struct net_device *)data;
1622 struct vortex_private *vp = (struct vortex_private *)dev->priv;
1623 long ioaddr = dev->base_addr;
1624 int next_tick = 60*HZ;
1626 int media_status, mii_status, old_window;
1628 if (vortex_debug > 2) {
1629 printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n",
1630 dev->name, media_tbl[dev->if_port].name);
1631 printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
1635 goto leave_media_alone;
1636 disable_irq(dev->irq);
1637 old_window = inw(ioaddr + EL3_CMD) >> 13;
1639 media_status = inw(ioaddr + Wn4_Media);
1640 switch (dev->if_port) {
1641 case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx:
1642 if (media_status & Media_LnkBeat) {
1643 netif_carrier_on(dev);
1645 if (vortex_debug > 1)
1646 printk(KERN_DEBUG "%s: Media %s has link beat, %x.\n",
1647 dev->name, media_tbl[dev->if_port].name, media_status);
1648 } else if (vortex_debug > 1) {
1649 netif_carrier_off(dev);
1650 printk(KERN_DEBUG "%s: Media %s has no link beat, %x.\n",
1651 dev->name, media_tbl[dev->if_port].name, media_status);
1654 case XCVR_MII: case XCVR_NWAY:
1656 mii_status = mdio_read(dev, vp->phys[0], 1);
1658 if (vortex_debug > 2)
1659 printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n",
1660 dev->name, mii_status);
1661 if (mii_status & BMSR_LSTATUS) {
1662 int mii_reg5 = mdio_read(dev, vp->phys[0], 5);
1663 if (! vp->force_fd && mii_reg5 != 0xffff) {
1664 int duplex = (mii_reg5&0x0100) ||
1665 (mii_reg5 & 0x01C0) == 0x0040;
1666 if (vp->full_duplex != duplex) {
1667 vp->full_duplex = duplex;
1668 printk(KERN_INFO "%s: Setting %s-duplex based on MII "
1669 "#%d link partner capability of %4.4x.\n",
1670 dev->name, vp->full_duplex ? "full" : "half",
1671 vp->phys[0], mii_reg5);
1672 /* Set the full-duplex bit. */
1674 outw( (vp->full_duplex ? 0x20 : 0) |
1675 (dev->mtu > 1500 ? 0x40 : 0) |
1676 ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
1677 ioaddr + Wn3_MAC_Ctrl);
1678 if (vortex_debug > 1)
1679 printk(KERN_DEBUG "Setting duplex in Wn3_MAC_Ctrl\n");
1680 /* AKPM: bug: should reset Tx and Rx after setting Duplex. Page 180 */
1683 netif_carrier_on(dev);
1685 netif_carrier_off(dev);
1689 default: /* Other media types handled by Tx timeouts. */
1690 if (vortex_debug > 1)
1691 printk(KERN_DEBUG "%s: Media %s has no indication, %x.\n",
1692 dev->name, media_tbl[dev->if_port].name, media_status);
1696 unsigned int config;
1699 dev->if_port = media_tbl[dev->if_port].next;
1700 } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
1701 if (dev->if_port == XCVR_Default) { /* Go back to default. */
1702 dev->if_port = vp->default_media;
1703 if (vortex_debug > 1)
1704 printk(KERN_DEBUG "%s: Media selection failing, using default "
1706 dev->name, media_tbl[dev->if_port].name);
1708 if (vortex_debug > 1)
1709 printk(KERN_DEBUG "%s: Media selection failed, now trying "
1711 dev->name, media_tbl[dev->if_port].name);
1712 next_tick = media_tbl[dev->if_port].wait;
1714 outw((media_status & ~(Media_10TP|Media_SQE)) |
1715 media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
1718 config = inl(ioaddr + Wn3_Config);
1719 config = BFINS(config, dev->if_port, 20, 4);
1720 outl(config, ioaddr + Wn3_Config);
1722 outw(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
1724 if (vortex_debug > 1)
1725 printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config);
1726 /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */
1728 EL3WINDOW(old_window);
1729 enable_irq(dev->irq);
1732 if (vortex_debug > 2)
1733 printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n",
1734 dev->name, media_tbl[dev->if_port].name);
1736 mod_timer(&vp->timer, RUN_AT(next_tick));
1738 outw(FakeIntr, ioaddr + EL3_CMD);
1742 static void vortex_tx_timeout(struct net_device *dev)
1744 struct vortex_private *vp = (struct vortex_private *)dev->priv;
1745 long ioaddr = dev->base_addr;
1747 printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
1748 dev->name, inb(ioaddr + TxStatus),
1749 inw(ioaddr + EL3_STATUS));
1751 printk(KERN_ERR " diagnostics: net %04x media %04x dma %8.8x.\n",
1752 inw(ioaddr + Wn4_NetDiag), inw(ioaddr + Wn4_Media),
1753 inl(ioaddr + PktStatus));
1754 /* Slight code bloat to be user friendly. */
1755 if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
1756 printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
1757 " network cable problem?\n", dev->name);
1758 if (inw(ioaddr + EL3_STATUS) & IntLatch) {
1759 printk(KERN_ERR "%s: Interrupt posted but not delivered --"
1760 " IRQ blocked by another device?\n", dev->name);
1761 /* Bad idea here.. but we might as well handle a few events. */
1764 * Block interrupts because vortex_interrupt does a bare spin_lock()
1766 unsigned long flags;
1767 local_irq_save(flags);
1768 if (vp->full_bus_master_tx)
1769 boomerang_interrupt(dev->irq, dev, 0);
1771 vortex_interrupt(dev->irq, dev, 0);
1772 local_irq_restore(flags);
1776 if (vortex_debug > 0)
1779 issue_and_wait(dev, TxReset);
1781 vp->stats.tx_errors++;
1782 if (vp->full_bus_master_tx) {
1783 printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name);
1784 if (vp->cur_tx - vp->dirty_tx > 0 && inl(ioaddr + DownListPtr) == 0)
1785 outl(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
1786 ioaddr + DownListPtr);
1787 if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
1788 netif_wake_queue (dev);
1789 if (vp->drv_flags & IS_BOOMERANG)
1790 outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
1791 outw(DownUnstall, ioaddr + EL3_CMD);
1793 vp->stats.tx_dropped++;
1794 netif_wake_queue(dev);
1797 /* Issue Tx Enable */
1798 outw(TxEnable, ioaddr + EL3_CMD);
1799 dev->trans_start = jiffies;
1801 /* Switch to register set 7 for normal use. */
1806 * Handle uncommon interrupt sources. This is a separate routine to minimize
1810 vortex_error(struct net_device *dev, int status)
1812 struct vortex_private *vp = (struct vortex_private *)dev->priv;
1813 long ioaddr = dev->base_addr;
1814 int do_tx_reset = 0, reset_mask = 0;
1815 unsigned char tx_status = 0;
1817 if (vortex_debug > 2) {
1818 printk(KERN_ERR "%s: vortex_error(), status=0x%x\n", dev->name, status);
1821 if (status & TxComplete) { /* Really "TxError" for us. */
1822 tx_status = inb(ioaddr + TxStatus);
1823 /* Presumably a tx-timeout. We must merely re-enable. */
1824 if (vortex_debug > 2
1825 || (tx_status != 0x88 && vortex_debug > 0)) {
1826 printk(KERN_ERR "%s: Transmit error, Tx status register %2.2x.\n",
1827 dev->name, tx_status);
1828 if (tx_status == 0x82) {
1829 printk(KERN_ERR "Probably a duplex mismatch. See "
1830 "Documentation/networking/vortex.txt\n");
1834 if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
1835 if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
1836 outb(0, ioaddr + TxStatus);
1837 if (tx_status & 0x30) { /* txJabber or txUnderrun */
1839 } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */
1841 reset_mask = 0x0108; /* Reset interface logic, but not download logic */
1842 } else { /* Merely re-enable the transmitter. */
1843 outw(TxEnable, ioaddr + EL3_CMD);
1847 if (status & RxEarly) { /* Rx early is unused. */
1849 outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
1851 if (status & StatsFull) { /* Empty statistics. */
1852 static int DoneDidThat;
1853 if (vortex_debug > 4)
1854 printk(KERN_DEBUG "%s: Updating stats.\n", dev->name);
1855 update_stats(ioaddr, dev);
1856 /* HACK: Disable statistics as an interrupt source. */
1857 /* This occurs when we have the wrong media type! */
1858 if (DoneDidThat == 0 &&
1859 inw(ioaddr + EL3_STATUS) & StatsFull) {
1860 printk(KERN_WARNING "%s: Updating statistics failed, disabling "
1861 "stats as an interrupt source.\n", dev->name);
1863 outw(SetIntrEnb | (inw(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
1864 vp->intr_enable &= ~StatsFull;
1869 if (status & IntReq) { /* Restore all interrupt sources. */
1870 outw(vp->status_enable, ioaddr + EL3_CMD);
1871 outw(vp->intr_enable, ioaddr + EL3_CMD);
1873 if (status & HostError) {
1876 fifo_diag = inw(ioaddr + Wn4_FIFODiag);
1877 printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n",
1878 dev->name, fifo_diag);
1879 /* Adapter failure requires Tx/Rx reset and reinit. */
1880 if (vp->full_bus_master_tx) {
1881 int bus_status = inl(ioaddr + PktStatus);
1882 /* 0x80000000 PCI master abort. */
1883 /* 0x40000000 PCI target abort. */
1885 printk(KERN_ERR "%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
1887 /* In this case, blow the card away */
1889 issue_and_wait(dev, TotalReset | 0xff);
1890 vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */
1891 } else if (fifo_diag & 0x0400)
1893 if (fifo_diag & 0x3000) {
1894 /* Reset Rx fifo and upload logic */
1895 issue_and_wait(dev, RxReset|0x07);
1896 /* Set the Rx filter to the current state. */
1898 outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
1899 outw(AckIntr | HostError, ioaddr + EL3_CMD);
1904 issue_and_wait(dev, TxReset|reset_mask);
1905 outw(TxEnable, ioaddr + EL3_CMD);
1906 if (!vp->full_bus_master_tx)
1907 netif_wake_queue(dev);
1912 vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
1914 struct vortex_private *vp = (struct vortex_private *)dev->priv;
1915 long ioaddr = dev->base_addr;
1917 /* Put out the doubleword header... */
1918 outl(skb->len, ioaddr + TX_FIFO);
1919 if (vp->bus_master) {
1920 /* Set the bus-master controller to transfer the packet. */
1921 int len = (skb->len + 3) & ~3;
1922 outl( vp->tx_skb_dma = pci_map_single(vp->pdev, skb->data, len, PCI_DMA_TODEVICE),
1923 ioaddr + Wn7_MasterAddr);
1924 outw(len, ioaddr + Wn7_MasterLen);
1926 outw(StartDMADown, ioaddr + EL3_CMD);
1927 /* netif_wake_queue() will be called at the DMADone interrupt. */
1929 /* ... and the packet rounded to a doubleword. */
1930 outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
1931 dev_kfree_skb (skb);
1932 if (inw(ioaddr + TxFree) > 1536) {
1933 netif_start_queue (dev); /* AKPM: redundant? */
1935 /* Interrupt us when the FIFO has room for max-sized packet. */
1936 netif_stop_queue(dev);
1937 outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
1941 dev->trans_start = jiffies;
1943 /* Clear the Tx status stack. */
1948 while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
1949 if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
1950 if (vortex_debug > 2)
1951 printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
1952 dev->name, tx_status);
1953 if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
1954 if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
1955 if (tx_status & 0x30) {
1956 issue_and_wait(dev, TxReset);
1958 outw(TxEnable, ioaddr + EL3_CMD);
1960 outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
1967 boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
1969 struct vortex_private *vp = (struct vortex_private *)dev->priv;
1970 long ioaddr = dev->base_addr;
1971 /* Calculate the next Tx descriptor entry. */
1972 int entry = vp->cur_tx % TX_RING_SIZE;
1973 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
1974 unsigned long flags;
1976 if (vortex_debug > 6) {
1977 printk(KERN_DEBUG "boomerang_start_xmit()\n");
1978 if (vortex_debug > 3)
1979 printk(KERN_DEBUG "%s: Trying to send a packet, Tx index %d.\n",
1980 dev->name, vp->cur_tx);
1983 if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
1984 if (vortex_debug > 0)
1985 printk(KERN_WARNING "%s: BUG! Tx Ring full, refusing to send buffer.\n",
1987 netif_stop_queue(dev);
1991 vp->tx_skbuff[entry] = skb;
1993 vp->tx_ring[entry].next = 0;
1995 if (skb->ip_summed != CHECKSUM_HW)
1996 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
1998 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum);
2000 if (!skb_shinfo(skb)->nr_frags) {
2001 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->data,
2002 skb->len, PCI_DMA_TODEVICE));
2003 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
2007 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->data,
2008 skb->len-skb->data_len, PCI_DMA_TODEVICE));
2009 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len);
2011 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2012 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2014 vp->tx_ring[entry].frag[i+1].addr =
2015 cpu_to_le32(pci_map_single(vp->pdev,
2016 (void*)page_address(frag->page) + frag->page_offset,
2017 frag->size, PCI_DMA_TODEVICE));
2019 if (i == skb_shinfo(skb)->nr_frags-1)
2020 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
2022 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
2026 vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE));
2027 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
2028 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2031 spin_lock_irqsave(&vp->lock, flags);
2032 /* Wait for the stall to complete. */
2033 issue_and_wait(dev, DownStall);
2034 prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
2035 if (inl(ioaddr + DownListPtr) == 0) {
2036 outl(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
2037 vp->queued_packet++;
2041 if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
2042 netif_stop_queue (dev);
2043 } else { /* Clear previous interrupt enable. */
2044 #if defined(tx_interrupt_mitigation)
2045 /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
2046 * were selected, this would corrupt DN_COMPLETE. No?
2048 prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
2051 outw(DownUnstall, ioaddr + EL3_CMD);
2052 spin_unlock_irqrestore(&vp->lock, flags);
2053 dev->trans_start = jiffies;
2057 /* The interrupt handler does all of the Rx thread work and cleans up
2058 after the Tx thread. */
2061 * This is the ISR for the vortex series chips.
2062 * full_bus_master_tx == 0 && full_bus_master_rx == 0
2065 static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2067 struct net_device *dev = dev_id;
2068 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2071 int work_done = max_interrupt_work;
2073 ioaddr = dev->base_addr;
2074 spin_lock(&vp->lock);
2076 status = inw(ioaddr + EL3_STATUS);
2078 if (vortex_debug > 6)
2079 printk("vortex_interrupt(). status=0x%4x\n", status);
2081 if ((status & IntLatch) == 0)
2082 goto handler_exit; /* No interrupt: shared IRQs cause this */
2084 if (status & IntReq) {
2085 status |= vp->deferred;
2089 if (status == 0xffff) /* h/w no longer present (hotplug)? */
2092 if (vortex_debug > 4)
2093 printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
2094 dev->name, status, inb(ioaddr + Timer));
2097 if (vortex_debug > 5)
2098 printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
2100 if (status & RxComplete)
2103 if (status & TxAvailable) {
2104 if (vortex_debug > 5)
2105 printk(KERN_DEBUG " TX room bit was handled.\n");
2106 /* There's room in the FIFO for a full-sized packet. */
2107 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
2108 netif_wake_queue (dev);
2111 if (status & DMADone) {
2112 if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) {
2113 outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
2114 pci_unmap_single(vp->pdev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
2115 dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
2116 if (inw(ioaddr + TxFree) > 1536) {
2118 * AKPM: FIXME: I don't think we need this. If the queue was stopped due to
2119 * insufficient FIFO room, the TxAvailable test will succeed and call
2120 * netif_wake_queue()
2122 netif_wake_queue(dev);
2123 } else { /* Interrupt when FIFO has room for max-sized packet. */
2124 outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
2125 netif_stop_queue(dev);
2129 /* Check for all uncommon interrupts at once. */
2130 if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
2131 if (status == 0xffff)
2133 vortex_error(dev, status);
2136 if (--work_done < 0) {
2137 printk(KERN_WARNING "%s: Too much work in interrupt, status "
2138 "%4.4x.\n", dev->name, status);
2139 /* Disable all pending interrupts. */
2141 vp->deferred |= status;
2142 outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
2144 outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2145 } while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
2146 /* The timer will reenable interrupts. */
2147 mod_timer(&vp->timer, jiffies + 1*HZ);
2150 /* Acknowledge the IRQ. */
2151 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
2152 } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
2154 if (vortex_debug > 4)
2155 printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
2158 spin_unlock(&vp->lock);
2162 * This is the ISR for the boomerang series chips.
2163 * full_bus_master_tx == 1 && full_bus_master_rx == 1
2166 static void boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2168 struct net_device *dev = dev_id;
2169 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2172 int work_done = max_interrupt_work;
2174 ioaddr = dev->base_addr;
2177 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
2178 * and boomerang_start_xmit
2180 spin_lock(&vp->lock);
2182 status = inw(ioaddr + EL3_STATUS);
2184 if (vortex_debug > 6)
2185 printk(KERN_DEBUG "boomerang_interrupt. status=0x%4x\n", status);
2187 if ((status & IntLatch) == 0)
2188 goto handler_exit; /* No interrupt: shared IRQs can cause this */
2190 if (status == 0xffff) { /* h/w no longer present (hotplug)? */
2191 if (vortex_debug > 1)
2192 printk(KERN_DEBUG "boomerang_interrupt(1): status = 0xffff\n");
2196 if (status & IntReq) {
2197 status |= vp->deferred;
2201 if (vortex_debug > 4)
2202 printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
2203 dev->name, status, inb(ioaddr + Timer));
2205 if (vortex_debug > 5)
2206 printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
2208 if (status & UpComplete) {
2209 outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
2210 if (vortex_debug > 5)
2211 printk(KERN_DEBUG "boomerang_interrupt->boomerang_rx\n");
2215 if (status & DownComplete) {
2216 unsigned int dirty_tx = vp->dirty_tx;
2218 outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
2219 while (vp->cur_tx - dirty_tx > 0) {
2220 int entry = dirty_tx % TX_RING_SIZE;
2221 #if 1 /* AKPM: the latter is faster, but cyclone-only */
2222 if (inl(ioaddr + DownListPtr) ==
2223 vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
2224 break; /* It still hasn't been processed. */
2226 if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
2227 break; /* It still hasn't been processed. */
2230 if (vp->tx_skbuff[entry]) {
2231 struct sk_buff *skb = vp->tx_skbuff[entry];
2234 for (i=0; i<=skb_shinfo(skb)->nr_frags; i++)
2235 pci_unmap_single(vp->pdev,
2236 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
2237 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
2240 pci_unmap_single(vp->pdev,
2241 le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
2243 dev_kfree_skb_irq(skb);
2244 vp->tx_skbuff[entry] = 0;
2246 printk(KERN_DEBUG "boomerang_interrupt: no skb!\n");
2248 /* vp->stats.tx_packets++; Counted below. */
2251 vp->dirty_tx = dirty_tx;
2252 if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
2253 if (vortex_debug > 6)
2254 printk(KERN_DEBUG "boomerang_interrupt: wake queue\n");
2255 netif_wake_queue (dev);
2259 /* Check for all uncommon interrupts at once. */
2260 if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
2261 vortex_error(dev, status);
2263 if (--work_done < 0) {
2264 printk(KERN_WARNING "%s: Too much work in interrupt, status "
2265 "%4.4x.\n", dev->name, status);
2266 /* Disable all pending interrupts. */
2268 vp->deferred |= status;
2269 outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
2271 outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2272 } while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
2273 /* The timer will reenable interrupts. */
2274 mod_timer(&vp->timer, jiffies + 1*HZ);
2277 /* Acknowledge the IRQ. */
2278 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
2279 if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
2280 writel(0x8000, vp->cb_fn_base + 4);
2282 } while ((status = inw(ioaddr + EL3_STATUS)) & IntLatch);
2284 if (vortex_debug > 4)
2285 printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
2288 spin_unlock(&vp->lock);
2291 static int vortex_rx(struct net_device *dev)
2293 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2294 long ioaddr = dev->base_addr;
2298 if (vortex_debug > 5)
2299 printk(KERN_DEBUG "vortex_rx(): status %4.4x, rx_status %4.4x.\n",
2300 inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
2301 while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
2302 if (rx_status & 0x4000) { /* Error, update stats. */
2303 unsigned char rx_error = inb(ioaddr + RxErrors);
2304 if (vortex_debug > 2)
2305 printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
2306 vp->stats.rx_errors++;
2307 if (rx_error & 0x01) vp->stats.rx_over_errors++;
2308 if (rx_error & 0x02) vp->stats.rx_length_errors++;
2309 if (rx_error & 0x04) vp->stats.rx_frame_errors++;
2310 if (rx_error & 0x08) vp->stats.rx_crc_errors++;
2311 if (rx_error & 0x10) vp->stats.rx_length_errors++;
2313 /* The packet length: up to 4.5K!. */
2314 int pkt_len = rx_status & 0x1fff;
2315 struct sk_buff *skb;
2317 skb = dev_alloc_skb(pkt_len + 5);
2318 if (vortex_debug > 4)
2319 printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
2320 pkt_len, rx_status);
2323 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
2324 /* 'skb_put()' points to the start of sk_buff data area. */
2325 if (vp->bus_master &&
2326 ! (inw(ioaddr + Wn7_MasterStatus) & 0x8000)) {
2327 dma_addr_t dma = pci_map_single(vp->pdev, skb_put(skb, pkt_len),
2328 pkt_len, PCI_DMA_FROMDEVICE);
2329 outl(dma, ioaddr + Wn7_MasterAddr);
2330 outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
2331 outw(StartDMAUp, ioaddr + EL3_CMD);
2332 while (inw(ioaddr + Wn7_MasterStatus) & 0x8000)
2334 pci_unmap_single(vp->pdev, dma, pkt_len, PCI_DMA_FROMDEVICE);
2336 insl(ioaddr + RX_FIFO, skb_put(skb, pkt_len),
2337 (pkt_len + 3) >> 2);
2339 outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
2340 skb->protocol = eth_type_trans(skb, dev);
2342 dev->last_rx = jiffies;
2343 vp->stats.rx_packets++;
2344 /* Wait a limited time to go to next packet. */
2345 for (i = 200; i >= 0; i--)
2346 if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
2349 } else if (vortex_debug > 0)
2350 printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
2351 "size %d.\n", dev->name, pkt_len);
2353 vp->stats.rx_dropped++;
2354 issue_and_wait(dev, RxDiscard);
2361 boomerang_rx(struct net_device *dev)
2363 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2364 int entry = vp->cur_rx % RX_RING_SIZE;
2365 long ioaddr = dev->base_addr;
2367 int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
2369 if (vortex_debug > 5)
2370 printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", inw(ioaddr+EL3_STATUS));
2372 while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
2373 if (--rx_work_limit < 0)
2375 if (rx_status & RxDError) { /* Error, update stats. */
2376 unsigned char rx_error = rx_status >> 16;
2377 if (vortex_debug > 2)
2378 printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
2379 vp->stats.rx_errors++;
2380 if (rx_error & 0x01) vp->stats.rx_over_errors++;
2381 if (rx_error & 0x02) vp->stats.rx_length_errors++;
2382 if (rx_error & 0x04) vp->stats.rx_frame_errors++;
2383 if (rx_error & 0x08) vp->stats.rx_crc_errors++;
2384 if (rx_error & 0x10) vp->stats.rx_length_errors++;
2386 /* The packet length: up to 4.5K!. */
2387 int pkt_len = rx_status & 0x1fff;
2388 struct sk_buff *skb;
2389 dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
2391 if (vortex_debug > 4)
2392 printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
2393 pkt_len, rx_status);
2395 /* Check if the packet is long enough to just accept without
2396 copying to a properly sized skbuff. */
2397 if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
2399 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
2400 pci_dma_sync_single(vp->pdev, dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2401 /* 'skb_put()' points to the start of sk_buff data area. */
2402 memcpy(skb_put(skb, pkt_len),
2403 vp->rx_skbuff[entry]->tail,
2407 /* Pass up the skbuff already on the Rx ring. */
2408 skb = vp->rx_skbuff[entry];
2409 vp->rx_skbuff[entry] = NULL;
2410 skb_put(skb, pkt_len);
2411 pci_unmap_single(vp->pdev, dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2414 skb->protocol = eth_type_trans(skb, dev);
2415 { /* Use hardware checksum info. */
2416 int csum_bits = rx_status & 0xee000000;
2418 (csum_bits == (IPChksumValid | TCPChksumValid) ||
2419 csum_bits == (IPChksumValid | UDPChksumValid))) {
2420 skb->ip_summed = CHECKSUM_UNNECESSARY;
2425 dev->last_rx = jiffies;
2426 vp->stats.rx_packets++;
2428 entry = (++vp->cur_rx) % RX_RING_SIZE;
2430 /* Refill the Rx ring buffers. */
2431 for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
2432 struct sk_buff *skb;
2433 entry = vp->dirty_rx % RX_RING_SIZE;
2434 if (vp->rx_skbuff[entry] == NULL) {
2435 skb = dev_alloc_skb(PKT_BUF_SZ);
2437 static unsigned long last_jif;
2438 if ((jiffies - last_jif) > 10 * HZ) {
2439 printk(KERN_WARNING "%s: memory shortage\n", dev->name);
2442 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
2443 mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
2444 break; /* Bad news! */
2446 skb->dev = dev; /* Mark as being used by this device. */
2447 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
2448 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(vp->pdev, skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
2449 vp->rx_skbuff[entry] = skb;
2451 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
2452 outw(UpUnstall, ioaddr + EL3_CMD);
2458 * If we've hit a total OOM refilling the Rx ring we poll once a second
2459 * for some memory. Otherwise there is no way to restart the rx process.
2462 rx_oom_timer(unsigned long arg)
2464 struct net_device *dev = (struct net_device *)arg;
2465 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2467 spin_lock_irq(&vp->lock);
2468 if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
2470 if (vortex_debug > 1) {
2471 printk(KERN_DEBUG "%s: rx_oom_timer %s\n", dev->name,
2472 ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
2474 spin_unlock_irq(&vp->lock);
2478 vortex_down(struct net_device *dev)
2480 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2481 long ioaddr = dev->base_addr;
2483 netif_stop_queue (dev);
2485 del_timer_sync(&vp->rx_oom_timer);
2486 del_timer_sync(&vp->timer);
2488 /* Turn off statistics ASAP. We update vp->stats below. */
2489 outw(StatsDisable, ioaddr + EL3_CMD);
2491 /* Disable the receiver and transmitter. */
2492 outw(RxDisable, ioaddr + EL3_CMD);
2493 outw(TxDisable, ioaddr + EL3_CMD);
2495 if (dev->if_port == XCVR_10base2)
2496 /* Turn off thinnet power. Green! */
2497 outw(StopCoax, ioaddr + EL3_CMD);
2499 outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
2501 update_stats(ioaddr, dev);
2502 if (vp->full_bus_master_rx)
2503 outl(0, ioaddr + UpListPtr);
2504 if (vp->full_bus_master_tx)
2505 outl(0, ioaddr + DownListPtr);
2507 if (vp->pdev && vp->enable_wol) {
2508 pci_save_state(vp->pdev, vp->power_state);
2514 vortex_close(struct net_device *dev)
2516 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2517 long ioaddr = dev->base_addr;
2520 if (netif_device_present(dev))
2523 if (vortex_debug > 1) {
2524 printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
2525 dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
2526 printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
2527 " tx_queued %d Rx pre-checksummed %d.\n",
2528 dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
2532 if ( vp->rx_csumhits &&
2533 ((vp->drv_flags & HAS_HWCKSM) == 0) &&
2534 (hw_checksums[vp->card_idx] == -1)) {
2535 printk(KERN_WARNING "%s supports hardware checksums, and we're not using them!\n", dev->name);
2536 printk(KERN_WARNING "Please see http://www.uow.edu.au/~andrewm/zerocopy.html\n");
2540 free_irq(dev->irq, dev);
2542 if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
2543 for (i = 0; i < RX_RING_SIZE; i++)
2544 if (vp->rx_skbuff[i]) {
2545 pci_unmap_single( vp->pdev, le32_to_cpu(vp->rx_ring[i].addr),
2546 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2547 dev_kfree_skb(vp->rx_skbuff[i]);
2548 vp->rx_skbuff[i] = 0;
2551 if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
2552 for (i = 0; i < TX_RING_SIZE; i++) {
2553 if (vp->tx_skbuff[i]) {
2554 struct sk_buff *skb = vp->tx_skbuff[i];
2558 for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
2559 pci_unmap_single(vp->pdev,
2560 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
2561 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
2564 pci_unmap_single(vp->pdev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
2567 vp->tx_skbuff[i] = 0;
2576 dump_tx_ring(struct net_device *dev)
2578 if (vortex_debug > 0) {
2579 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2580 long ioaddr = dev->base_addr;
2582 if (vp->full_bus_master_tx) {
2584 int stalled = inl(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */
2586 printk(KERN_ERR " Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
2587 vp->full_bus_master_tx,
2588 vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
2589 vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
2590 printk(KERN_ERR " Transmit list %8.8x vs. %p.\n",
2591 inl(ioaddr + DownListPtr),
2592 &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
2593 issue_and_wait(dev, DownStall);
2594 for (i = 0; i < TX_RING_SIZE; i++) {
2595 printk(KERN_ERR " %d: @%p length %8.8x status %8.8x\n", i,
2598 le32_to_cpu(vp->tx_ring[i].frag[0].length),
2600 le32_to_cpu(vp->tx_ring[i].length),
2602 le32_to_cpu(vp->tx_ring[i].status));
2605 outw(DownUnstall, ioaddr + EL3_CMD);
2610 static struct net_device_stats *vortex_get_stats(struct net_device *dev)
2612 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2613 unsigned long flags;
2615 if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */
2616 spin_lock_irqsave (&vp->lock, flags);
2617 update_stats(dev->base_addr, dev);
2618 spin_unlock_irqrestore (&vp->lock, flags);
2623 /* Update statistics.
2624 Unlike with the EL3 we need not worry about interrupts changing
2625 the window setting from underneath us, but we must still guard
2626 against a race condition with a StatsUpdate interrupt updating the
2627 table. This is done by checking that the ASM (!) code generated uses
2628 atomic updates with '+='.
2630 static void update_stats(long ioaddr, struct net_device *dev)
2632 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2633 int old_window = inw(ioaddr + EL3_CMD);
2635 if (old_window == 0xffff) /* Chip suspended or ejected. */
2637 /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
2638 /* Switch to the stats window, and read everything. */
2640 vp->stats.tx_carrier_errors += inb(ioaddr + 0);
2641 vp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
2642 /* Multiple collisions. */ inb(ioaddr + 2);
2643 vp->stats.collisions += inb(ioaddr + 3);
2644 vp->stats.tx_window_errors += inb(ioaddr + 4);
2645 vp->stats.rx_fifo_errors += inb(ioaddr + 5);
2646 vp->stats.tx_packets += inb(ioaddr + 6);
2647 vp->stats.tx_packets += (inb(ioaddr + 9)&0x30) << 4;
2648 /* Rx packets */ inb(ioaddr + 7); /* Must read to clear */
2649 /* Tx deferrals */ inb(ioaddr + 8);
2650 /* Don't bother with register 9, an extension of registers 6&7.
2651 If we do use the 6&7 values the atomic update assumption above
2653 vp->stats.rx_bytes += inw(ioaddr + 10);
2654 vp->stats.tx_bytes += inw(ioaddr + 12);
2655 /* New: On the Vortex we must also clear the BadSSD counter. */
2660 u8 up = inb(ioaddr + 13);
2661 vp->stats.rx_bytes += (up & 0x0f) << 16;
2662 vp->stats.tx_bytes += (up & 0xf0) << 12;
2665 EL3WINDOW(old_window >> 13);
2670 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
2672 struct vortex_private *vp = dev->priv;
2675 if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
2679 case ETHTOOL_GDRVINFO: {
2680 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
2681 strcpy(info.driver, DRV_NAME);
2682 strcpy(info.version, DRV_VERSION);
2684 strcpy(info.bus_info, vp->pdev->slot_name);
2686 sprintf(info.bus_info, "EISA 0x%lx %d",
2687 dev->base_addr, dev->irq);
2688 if (copy_to_user(useraddr, &info, sizeof(info)))
2698 static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2700 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2701 long ioaddr = dev->base_addr;
2702 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
2703 int phy = vp->phys[0] & 0x1f;
2708 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
2710 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2711 case SIOCDEVPRIVATE: /* for binary compat, remove in 2.5 */
2714 case SIOCGMIIREG: /* Read MII PHY register. */
2715 case SIOCDEVPRIVATE+1: /* for binary compat, remove in 2.5 */
2717 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2721 case SIOCSMIIREG: /* Write MII PHY register. */
2722 case SIOCDEVPRIVATE+2: /* for binary compat, remove in 2.5 */
2723 if (!capable(CAP_NET_ADMIN)) {
2727 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
2732 retval = -EOPNOTSUPP;
2739 /* Pre-Cyclone chips have no documented multicast filter, so the only
2740 multicast setting is to receive all multicast frames. At least
2741 the chip has a very clean way to set the mode, unlike many others. */
2742 static void set_rx_mode(struct net_device *dev)
2744 long ioaddr = dev->base_addr;
2747 if (dev->flags & IFF_PROMISC) {
2748 if (vortex_debug > 0)
2749 printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name);
2750 new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
2751 } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
2752 new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
2754 new_mode = SetRxFilter | RxStation | RxBroadcast;
2756 outw(new_mode, ioaddr + EL3_CMD);
2759 /* MII transceiver control section.
2760 Read and write the MII registers using software-generated serial
2761 MDIO protocol. See the MII specifications or DP83840A data sheet
2764 /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
2765 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
2766 "overclocking" issues. */
2767 #define mdio_delay() inl(mdio_addr)
2769 #define MDIO_SHIFT_CLK 0x01
2770 #define MDIO_DIR_WRITE 0x04
2771 #define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
2772 #define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
2773 #define MDIO_DATA_READ 0x02
2774 #define MDIO_ENB_IN 0x00
2776 /* Generate the preamble required for initial synchronization and
2777 a few older transceivers. */
2778 static void mdio_sync(long ioaddr, int bits)
2780 long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
2782 /* Establish sync by sending at least 32 logic ones. */
2783 while (-- bits >= 0) {
2784 outw(MDIO_DATA_WRITE1, mdio_addr);
2786 outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
2791 static int mdio_read(struct net_device *dev, int phy_id, int location)
2793 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2795 long ioaddr = dev->base_addr;
2796 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
2797 unsigned int retval = 0;
2798 long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
2800 spin_lock_bh(&vp->mdio_lock);
2802 if (mii_preamble_required)
2803 mdio_sync(ioaddr, 32);
2805 /* Shift the read command bits out. */
2806 for (i = 14; i >= 0; i--) {
2807 int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
2808 outw(dataval, mdio_addr);
2810 outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
2813 /* Read the two transition, 16 data, and wire-idle bits. */
2814 for (i = 19; i > 0; i--) {
2815 outw(MDIO_ENB_IN, mdio_addr);
2817 retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
2818 outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
2821 spin_unlock_bh(&vp->mdio_lock);
2822 return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
2825 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
2827 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2828 long ioaddr = dev->base_addr;
2829 int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
2830 long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
2833 spin_lock_bh(&vp->mdio_lock);
2835 if (mii_preamble_required)
2836 mdio_sync(ioaddr, 32);
2838 /* Shift the command bits out. */
2839 for (i = 31; i >= 0; i--) {
2840 int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
2841 outw(dataval, mdio_addr);
2843 outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
2846 /* Leave the interface idle. */
2847 for (i = 1; i >= 0; i--) {
2848 outw(MDIO_ENB_IN, mdio_addr);
2850 outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
2853 spin_unlock_bh(&vp->mdio_lock);
2857 /* ACPI: Advanced Configuration and Power Interface. */
2858 /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
2859 static void acpi_set_WOL(struct net_device *dev)
2861 struct vortex_private *vp = (struct vortex_private *)dev->priv;
2862 long ioaddr = dev->base_addr;
2864 /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
2866 outw(2, ioaddr + 0x0c);
2867 /* The RxFilter must accept the WOL frames. */
2868 outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
2869 outw(RxEnable, ioaddr + EL3_CMD);
2871 /* Change the power state to D3; RxEnable doesn't take effect. */
2872 pci_enable_wake(vp->pdev, 0, 1);
2873 pci_set_power_state(vp->pdev, 3);
2877 static void __devexit vortex_remove_one (struct pci_dev *pdev)
2879 struct net_device *dev = pci_get_drvdata(pdev);
2880 struct vortex_private *vp;
2883 printk("vortex_remove_one called for EISA device!\n");
2889 /* AKPM: FIXME: we should have
2890 * if (vp->cb_fn_base) iounmap(vp->cb_fn_base);
2893 unregister_netdev(dev);
2894 /* Should really use issue_and_wait() here */
2895 outw(TotalReset|0x14, dev->base_addr + EL3_CMD);
2897 if (vp->pdev && vp->enable_wol) {
2898 pci_set_power_state(vp->pdev, 0); /* Go active */
2899 if (vp->pm_state_valid)
2900 pci_restore_state(vp->pdev, vp->power_state);
2903 pci_free_consistent(pdev,
2904 sizeof(struct boom_rx_desc) * RX_RING_SIZE
2905 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
2908 if (vp->must_free_region)
2909 release_region(dev->base_addr, vp->io_size);
2914 static struct pci_driver vortex_driver = {
2916 probe: vortex_init_one,
2917 remove: __devexit_p(vortex_remove_one),
2918 id_table: vortex_pci_tbl,
2920 suspend: vortex_suspend,
2921 resume: vortex_resume,
2926 static int vortex_have_pci;
2927 static int vortex_have_eisa;
2930 static int __init vortex_init (void)
2932 int pci_rc, eisa_rc;
2934 pci_rc = pci_module_init(&vortex_driver);
2935 eisa_rc = vortex_eisa_init();
2938 vortex_have_pci = 1;
2940 vortex_have_eisa = 1;
2942 return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV;
2946 static void __exit vortex_eisa_cleanup (void)
2948 struct net_device *dev, *tmp;
2949 struct vortex_private *vp;
2952 dev = root_vortex_eisa_dev;
2956 ioaddr = dev->base_addr;
2958 unregister_netdev (dev);
2959 outw (TotalReset, ioaddr + EL3_CMD);
2960 release_region (ioaddr, VORTEX_TOTAL_SIZE);
2963 dev = vp->next_module;
2970 static void __exit vortex_cleanup (void)
2972 if (vortex_have_pci)
2973 pci_unregister_driver (&vortex_driver);
2974 if (vortex_have_eisa)
2975 vortex_eisa_cleanup ();
2979 module_init(vortex_init);
2980 module_exit(vortex_cleanup);