1 /* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
3 Written 1998-2000 by Donald Becker.
5 Current maintainer is Ion Badulescu <ionut@cs.columbia.edu>. Please
6 send all bug reports to me, and not to Donald Becker, as this code
7 has been modified quite a bit from Donald's original version.
9 This software may be used and distributed according to the terms of
10 the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on or derived from this code fall under the GPL and must
12 retain the authorship, copyright and license notice. This file is not
13 a complete program and may only be used when the entire operating
14 system is licensed under the GPL.
16 The author may be reached as becker@scyld.com, or C/O
17 Scyld Computing Corporation
18 410 Severn Ave., Suite 210
21 Support and updates available at
22 http://www.scyld.com/network/starfire.html
24 -----------------------------------------------------------
26 Linux kernel-specific changes:
29 - Use PCI driver interface
34 - Merge Becker version 0.15
36 LK1.1.3 (Andrew Morton)
40 - Merge Becker version 1.03
42 LK1.2.1 (Ion Badulescu <ionut@cs.columbia.edu>)
43 - Support hardware Rx/Tx checksumming
44 - Use the GFP firmware taken from Adaptec's Netware driver
46 LK1.2.2 (Ion Badulescu)
49 LK1.2.3 (Ion Badulescu)
50 - Fix the flaky mdio interface
51 - More compat clean-ups
53 LK1.2.4 (Ion Badulescu)
54 - More 2.2.x initialization fixes
56 LK1.2.5 (Ion Badulescu)
57 - Several fixes from Manfred Spraul
59 LK1.2.6 (Ion Badulescu)
60 - Fixed ifup/ifdown/ifup problem in 2.4.x
62 LK1.2.7 (Ion Badulescu)
64 - Made more functions static and __init
66 LK1.2.8 (Ion Badulescu)
67 - Quell bogus error messages, inform about the Tx threshold
68 - Removed #ifdef CONFIG_PCI, this driver is PCI only
70 LK1.2.9 (Ion Badulescu)
71 - Merged Jeff Garzik's changes from 2.4.4-pre5
72 - Added 2.2.x compatibility stuff required by the above changes
74 LK1.2.9a (Ion Badulescu)
75 - More updates from Jeff Garzik
77 LK1.3.0 (Ion Badulescu)
78 - Merged zerocopy support
80 LK1.3.1 (Ion Badulescu)
81 - Added ethtool support
82 - Added GPIO (media change) interrupt support
84 LK1.3.2 (Ion Badulescu)
85 - Fixed 2.2.x compatibility issues introduced in 1.3.1
86 - Fixed ethtool ioctl returning uninitialized memory
88 LK1.3.3 (Ion Badulescu)
89 - Initialize the TxMode register properly
90 - Don't dereference dev->priv after freeing it
92 LK1.3.4 (Ion Badulescu)
93 - Fixed initialization timing problems
94 - Fixed interrupt mask definitions
97 - ethtool NWAY_RST, GLINK, [GS]MSGLVL support
100 - Sparc64 support and fixes (Ion Badulescu)
101 - Better stats and error handling (Ion Badulescu)
102 - Use new pci_set_mwi() PCI API function (jgarzik)
105 - implement tx_timeout() properly
109 #define DRV_NAME "starfire"
110 #define DRV_VERSION "1.03+LK1.3.6"
111 #define DRV_RELDATE "March 7, 2002"
113 #include <linux/version.h>
114 #include <linux/module.h>
115 #include <linux/kernel.h>
116 #include <linux/pci.h>
117 #include <linux/netdevice.h>
118 #include <linux/etherdevice.h>
119 #include <linux/init.h>
120 #include <linux/delay.h>
121 #include <linux/crc32.h>
122 #include <asm/processor.h> /* Processor type for cache alignment. */
123 #include <asm/uaccess.h>
127 * Adaptec's license for their Novell drivers (which is where I got the
128 * firmware files) does not allow one to redistribute them. Thus, we can't
129 * include the firmware with this driver.
131 * However, should a legal-to-use firmware become available,
132 * the driver developer would need only to obtain the firmware in the
133 * form of a C header file.
134 * Once that's done, the #undef below must be changed into a #define
135 * for this driver to really use the firmware. Note that Rx/Tx
136 * hardware TCP checksumming is not possible without the firmware.
138 * WANTED: legal firmware to include with this GPL'd driver.
142 * The current frame processor firmware fails to checksum a fragment
143 * of length 1. If and when this is fixed, the #define below can be removed.
145 #define HAS_BROKEN_FIRMWARE
147 * Define this if using the driver with the zero-copy patch
149 #if defined(HAS_FIRMWARE) && defined(MAX_SKB_FRAGS)
154 #include "starfire_firmware.h"
155 #endif /* HAS_FIRMWARE */
157 /* The user-configurable values.
158 These may be modified when a driver module is loaded.*/
160 /* Used for tuning interrupt latency vs. overhead. */
161 static int interrupt_mitigation;
163 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
164 static int max_interrupt_work = 20;
166 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
167 The Starfire has a 512 element hash table based on the Ethernet CRC. */
168 static int multicast_filter_limit = 512;
170 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
172 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
173 * Setting to > 1518 effectively disables this feature.
176 * The ia64 doesn't allow for unaligned loads even of integers being
177 * misaligned on a 2 byte boundary. Thus always force copying of
178 * packets as the starfire doesn't allow for misaligned DMAs ;-(
181 * The Alpha and the Sparc don't allow unaligned loads, either. -Ion
183 #if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
184 static int rx_copybreak = PKT_BUF_SZ;
186 static int rx_copybreak /* = 0 */;
189 /* Used to pass the media type, etc.
190 Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
191 The media type is usually passed in 'options[]'.
193 #define MAX_UNITS 8 /* More are supported, limit only on options */
194 static int options[MAX_UNITS] = {0, };
195 static int full_duplex[MAX_UNITS] = {0, };
197 /* Operational parameters that are set at compile time. */
199 /* The "native" ring sizes are either 256 or 2048.
200 However in some modes a descriptor may be marked to wrap the ring earlier.
201 The driver allocates a single page for each descriptor ring, constraining
202 the maximum size in an architecture-dependent way.
204 #define RX_RING_SIZE 256
205 #define TX_RING_SIZE 32
206 /* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
207 #define DONE_Q_SIZE 1024
209 /* Operational parameters that usually are not changed. */
210 /* Time in jiffies before concluding the transmitter is hung. */
211 #define TX_TIMEOUT (2 * HZ)
214 #if MAX_SKB_FRAGS <= 6
215 #define MAX_STARFIRE_FRAGS 6
216 #else /* MAX_STARFIRE_FRAGS > 6 */
217 #warning This driver will not work with more than 6 skb fragments.
218 #warning Turning off zerocopy support.
220 #endif /* MAX_STARFIRE_FRAGS > 6 */
221 #endif /* ZEROCOPY */
224 #define skb_first_frag_len(skb) skb_headlen(skb)
225 #else /* not ZEROCOPY */
226 #define skb_first_frag_len(skb) (skb->len)
227 #endif /* not ZEROCOPY */
229 /* 2.2.x compatibility code */
230 #if LINUX_VERSION_CODE < 0x20300
232 #include "starfire-kcomp22.h"
234 #else /* LINUX_VERSION_CODE > 0x20300 */
236 #include <linux/ethtool.h>
237 #include <linux/mii.h>
239 #define COMPAT_MOD_INC_USE_COUNT
240 #define COMPAT_MOD_DEC_USE_COUNT
242 #define init_tx_timer(dev, func, timeout) \
243 dev->tx_timeout = func; \
244 dev->watchdog_timeo = timeout;
245 #define kick_tx_timer(dev, func, timeout)
247 #define netif_start_if(dev)
248 #define netif_stop_if(dev)
250 #define PCI_SLOT_NAME(pci_dev) (pci_dev)->slot_name
252 #endif /* LINUX_VERSION_CODE > 0x20300 */
253 /* end of compatibility code */
256 /* These identify the driver base version and may not be removed. */
257 static char version[] __devinitdata =
258 KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
259 KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
261 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
262 MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
263 MODULE_LICENSE("GPL");
265 MODULE_PARM(max_interrupt_work, "i");
266 MODULE_PARM(mtu, "i");
267 MODULE_PARM(debug, "i");
268 MODULE_PARM(rx_copybreak, "i");
269 MODULE_PARM(interrupt_mitigation, "i");
270 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
271 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
272 MODULE_PARM_DESC(max_interrupt_work, "Starfire maximum events handled per interrupt");
273 MODULE_PARM_DESC(mtu, "Starfire MTU (all boards)");
274 MODULE_PARM_DESC(debug, "Starfire debug level (0-6)");
275 MODULE_PARM_DESC(rx_copybreak, "Starfire copy breakpoint for copy-only-tiny-frames");
276 MODULE_PARM_DESC(options, "Starfire: Bits 0-3: media type, bit 17: full duplex");
277 MODULE_PARM_DESC(full_duplex, "Starfire full duplex setting(s) (1)");
282 I. Board Compatibility
284 This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
286 II. Board-specific settings
288 III. Driver operation
292 The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
293 ring sizes are set fixed by the hardware, but may optionally be wrapped
294 earlier by the END bit in the descriptor.
295 This driver uses that hardware queue size for the Rx ring, where a large
296 number of entries has no ill effect beyond increases the potential backlog.
297 The Tx ring is wrapped with the END bit, since a large hardware Tx queue
298 disables the queue layer priority ordering and we have no mechanism to
299 utilize the hardware two-level priority queue. When modifying the
300 RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
303 IIIb/c. Transmit/Receive Structure
305 See the Adaptec manual for the many possible structures, and options for
306 each structure. There are far too many to document here.
308 For transmit this driver uses type 0/1 transmit descriptors (depending
309 on the presence of the zerocopy infrastructure), and relies on automatic
310 minimum-length padding. It does not use the completion queue
311 consumer index, but instead checks for non-zero status entries.
313 For receive this driver uses type 0 receive descriptors. The driver
314 allocates full frame size skbuffs for the Rx ring buffers, so all frames
315 should fit in a single descriptor. The driver does not use the completion
316 queue consumer index, but instead checks for non-zero status entries.
318 When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
319 is allocated and the frame is copied to the new skbuff. When the incoming
320 frame is larger, the skbuff is passed directly up the protocol stack.
321 Buffers consumed this way are replaced by newly allocated skbuffs in a later
324 A notable aspect of operation is that unaligned buffers are not permitted by
325 the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
326 isn't longword aligned, which may cause problems on some machine
327 e.g. Alphas and IA64. For these architectures, the driver is forced to copy
328 the frame into a new skbuff unconditionally. Copied frames are put into the
329 skbuff at an offset of "+2", thus 16-byte aligning the IP header.
331 IIId. Synchronization
333 The driver runs as two independent, single-threaded flows of control. One
334 is the send-packet routine, which enforces single-threaded use by the
335 dev->tbusy flag. The other thread is the interrupt handler, which is single
336 threaded by the hardware and interrupt handling software.
338 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
339 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
340 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
341 the 'lp->tx_full' flag.
343 The interrupt handler has exclusive control over the Rx ring and records stats
344 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
345 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
346 clears both the tx_full and tbusy flags.
352 The Adaptec Starfire manuals, available only from Adaptec.
353 http://www.scyld.com/expert/100mbps.html
354 http://www.scyld.com/expert/NWay.html
362 enum chip_capability_flags {CanHaveMII=1, };
363 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR0)
366 #define ADDR_64BITS 1 /* This chip uses 64 bit addresses. */
369 #define HAS_IP_COPYSUM 1
375 static struct pci_device_id starfire_pci_tbl[] __devinitdata = {
376 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
379 MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
381 /* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
382 static struct chip_info {
385 } netdrv_tbl[] __devinitdata = {
386 { "Adaptec Starfire 6915", CanHaveMII },
390 /* Offsets to the device registers.
391 Unlike software-only systems, device drivers interact with complex hardware.
392 It's not useful to define symbolic names for every register bit in the
393 device. The name can only partially document the semantics and make
394 the driver longer and more difficult to read.
395 In general, only the important configuration values or bits changed
396 multiple times should be defined symbolically.
398 enum register_offsets {
399 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
400 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
401 MIICtrl=0x52000, StationAddr=0x50120, EEPROMCtrl=0x51000,
402 GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
403 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
404 TxRingHiAddr=0x5009C, /* 64 bit address extension. */
405 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
407 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
408 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
409 CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
410 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
411 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
412 TxMode=0x55000, PerfFilterTable=0x56000, HashTable=0x56100,
413 TxGfpMem=0x58000, RxGfpMem=0x5a000,
416 /* Bits in the interrupt status/mask registers. */
417 enum intr_status_bits {
418 IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
419 IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
420 IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
421 IntrTxComplQLow=0x200000, IntrPCI=0x100000,
422 IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
423 IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
424 IntrNormalSummary=0x8000, IntrTxDone=0x4000,
425 IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
426 IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
427 IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
428 IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
429 IntrNoTxCsum=0x20, IntrTxBadID=0x10,
430 IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
431 IntrTxGfp=0x02, IntrPCIPad=0x01,
433 IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
434 IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
435 IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
438 /* Bits in the RxFilterMode register. */
440 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
441 AcceptMulticast=0x10, AcceptMyPhys=0xE040,
444 /* Bits in the TxDescCtrl register. */
446 TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
447 TxDescSpace128=0x30, TxDescSpace256=0x40,
448 TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
449 TxDescType3=0x03, TxDescType4=0x04,
450 TxNoDMACompletion=0x08, TxDescQ64bit=0x80,
451 TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
452 TxDMABurstSizeShift=8,
455 /* Bits in the RxDescQCtrl register. */
457 RxBufferLenShift=16, RxMinDescrThreshShift=0,
458 RxPrefetchMode=0x8000, Rx2048QEntries=0x4000,
459 RxVariableQ=0x2000, RxDesc64bit=0x1000,
460 RxDescQAddr64bit=0x0100,
461 RxDescSpace4=0x000, RxDescSpace8=0x100,
462 RxDescSpace16=0x200, RxDescSpace32=0x300,
463 RxDescSpace64=0x400, RxDescSpace128=0x500,
467 /* Bits in the RxCompletionAddr register */
469 RxComplQAddr64bit=0x80, TxComplProducerWrEn=0x40,
470 RxComplType0=0x00, RxComplType1=0x10,
471 RxComplType2=0x20, RxComplType3=0x30,
472 RxComplThreshShift=0,
475 /* The Rx and Tx buffer descriptors. */
476 struct starfire_rx_desc {
477 u32 rxaddr; /* Optionally 64 bits. */
480 RxDescValid=1, RxDescEndRing=2,
483 /* Completion queue entry.
484 You must update the page allocation, init_ring and the shift count in rx()
485 if using a larger format. */
487 #define csum_rx_status
488 #endif /* HAS_FIRMWARE */
489 struct rx_done_desc {
490 u32 status; /* Low 16 bits is length. */
491 #ifdef csum_rx_status
492 u32 status2; /* Low 16 bits is csum */
493 #endif /* csum_rx_status */
494 #ifdef full_rx_status
497 u16 csum; /* partial checksum */
499 #endif /* full_rx_status */
502 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
506 /* Type 0 Tx descriptor. */
507 /* If more fragments are needed, don't forget to change the
508 descriptor spacing as well! */
509 struct starfire_tx_desc {
518 } frag[MAX_STARFIRE_FRAGS];
520 #else /* not ZEROCOPY */
521 /* Type 1 Tx descriptor. */
522 struct starfire_tx_desc {
523 u32 status; /* Upper bits are status, lower 16 length. */
526 #endif /* not ZEROCOPY */
529 TxCRCEn=0x01000000, TxDescIntr=0x08000000,
530 TxRingWrap=0x04000000, TxCalTCP=0x02000000,
532 struct tx_done_report {
533 u32 status; /* timestamp, index. */
535 u32 intrstatus; /* interrupt status */
539 struct rx_ring_info {
543 struct tx_ring_info {
545 dma_addr_t first_mapping;
547 dma_addr_t frag_mapping[MAX_STARFIRE_FRAGS];
548 #endif /* ZEROCOPY */
552 struct netdev_private {
553 /* Descriptor rings first for alignment. */
554 struct starfire_rx_desc *rx_ring;
555 struct starfire_tx_desc *tx_ring;
556 dma_addr_t rx_ring_dma;
557 dma_addr_t tx_ring_dma;
558 /* The addresses of rx/tx-in-place skbuffs. */
559 struct rx_ring_info rx_info[RX_RING_SIZE];
560 struct tx_ring_info tx_info[TX_RING_SIZE];
561 /* Pointers to completion queues (full pages). */
562 struct rx_done_desc *rx_done_q;
563 dma_addr_t rx_done_q_dma;
564 unsigned int rx_done;
565 struct tx_done_report *tx_done_q;
566 dma_addr_t tx_done_q_dma;
567 unsigned int tx_done;
568 struct net_device_stats stats;
569 struct pci_dev *pci_dev;
570 /* Frequently used values: keep some adjacent for cache effect. */
572 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
573 unsigned int cur_tx, dirty_tx;
574 unsigned int rx_buf_sz; /* Based on MTU+slack. */
575 unsigned int tx_full:1, /* The Tx queue is full. */
576 /* These values keep track of the transceiver/media in use. */
577 speed100:1; /* Set if speed == 100MBit. */
578 unsigned int intr_mitigation;
581 /* MII transceiver section. */
582 struct mii_if_info mii_if; /* MII lib hooks/info */
583 int phy_cnt; /* MII device addresses. */
584 unsigned char phys[PHY_CNT]; /* MII device addresses. */
588 static int mdio_read(struct net_device *dev, int phy_id, int location);
589 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
590 static int netdev_open(struct net_device *dev);
591 static void check_duplex(struct net_device *dev);
592 static void tx_timeout(struct net_device *dev);
593 static void init_ring(struct net_device *dev);
594 static int start_tx(struct sk_buff *skb, struct net_device *dev);
595 static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
596 static void netdev_error(struct net_device *dev, int intr_status);
597 static int netdev_rx(struct net_device *dev);
598 static void netdev_error(struct net_device *dev, int intr_status);
599 static void set_rx_mode(struct net_device *dev);
600 static struct net_device_stats *get_stats(struct net_device *dev);
601 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
602 static int netdev_close(struct net_device *dev);
603 static void netdev_media_change(struct net_device *dev);
607 static int __devinit starfire_init_one(struct pci_dev *pdev,
608 const struct pci_device_id *ent)
610 struct netdev_private *np;
611 int i, irq, option, chip_idx = ent->driver_data;
612 struct net_device *dev;
613 static int card_idx = -1;
615 int drv_flags, io_size;
617 #ifndef HAVE_PCI_SET_MWI
622 /* when built into the kernel, we only print version if device is found */
624 static int printed_version;
625 if (!printed_version++)
631 if (pci_enable_device (pdev))
634 ioaddr = pci_resource_start(pdev, 0);
635 io_size = pci_resource_len(pdev, 0);
636 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
637 printk (KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
641 dev = alloc_etherdev(sizeof(*np));
643 printk (KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
646 SET_MODULE_OWNER(dev);
650 if (pci_request_regions (pdev, dev->name)) {
651 printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
652 goto err_out_free_netdev;
655 /* ioremap is borken in Linux-2.2.x/sparc64 */
656 #if !defined(CONFIG_SPARC64) || LINUX_VERSION_CODE > 0x20300
657 ioaddr = (long) ioremap(ioaddr, io_size);
659 printk (KERN_ERR DRV_NAME " %d: cannot remap 0x%x @ 0x%lx, aborting\n",
660 card_idx, io_size, ioaddr);
661 goto err_out_free_res;
663 #endif /* !CONFIG_SPARC64 || Linux 2.3.0+ */
665 pci_set_master(pdev);
667 #ifdef HAVE_PCI_SET_MWI
670 /* enable MWI -- it vastly improves Rx performance on sparc64 */
671 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
672 cmd |= PCI_COMMAND_INVALIDATE;
673 pci_write_config_word(pdev, PCI_COMMAND, cmd);
675 /* set PCI cache size */
676 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
677 if ((cache << 2) != SMP_CACHE_BYTES) {
678 printk(KERN_INFO " PCI cache line size set incorrectly "
679 "(%i bytes) by BIOS/FW, correcting to %i\n",
680 (cache << 2), SMP_CACHE_BYTES);
681 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
682 SMP_CACHE_BYTES >> 2);
687 /* Starfire can do SG and TCP/UDP checksumming */
688 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
689 #endif /* ZEROCOPY */
691 /* Serial EEPROM reads are hidden by the hardware. */
692 for (i = 0; i < 6; i++)
693 dev->dev_addr[i] = readb(ioaddr + EEPROMCtrl + 20 - i);
695 #if ! defined(final_version) /* Dump the EEPROM contents during development. */
697 for (i = 0; i < 0x20; i++)
699 (unsigned int)readb(ioaddr + EEPROMCtrl + i),
700 i % 16 != 15 ? " " : "\n");
703 /* Issue soft reset */
704 writel(0x8000, ioaddr + TxMode);
706 writel(0, ioaddr + TxMode);
708 /* Reset the chip to erase previous misconfiguration. */
709 writel(1, ioaddr + PCIDeviceConfig);
711 while (--boguscnt > 0) {
713 if ((readl(ioaddr + PCIDeviceConfig) & 1) == 0)
717 printk("%s: chipset reset never completed!\n", dev->name);
718 /* wait a little longer */
721 dev->base_addr = ioaddr;
725 spin_lock_init(&np->lock);
726 pci_set_drvdata(pdev, dev);
730 np->mii_if.dev = dev;
731 np->mii_if.mdio_read = mdio_read;
732 np->mii_if.mdio_write = mdio_write;
733 np->mii_if.phy_id_mask = 0x1f;
734 np->mii_if.reg_num_mask = 0x1f;
736 drv_flags = netdrv_tbl[chip_idx].drv_flags;
738 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
740 option = dev->mem_start;
742 /* The lower four bits are the media type. */
744 np->mii_if.full_duplex = 1;
746 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
747 np->mii_if.full_duplex = 1;
749 if (np->mii_if.full_duplex)
750 np->mii_if.force_media = 0;
752 np->mii_if.force_media = 1;
755 /* The chip-specific entries in the device structure. */
756 dev->open = &netdev_open;
757 dev->hard_start_xmit = &start_tx;
758 init_tx_timer(dev, tx_timeout, TX_TIMEOUT);
759 dev->stop = &netdev_close;
760 dev->get_stats = &get_stats;
761 dev->set_multicast_list = &set_rx_mode;
762 dev->do_ioctl = &netdev_ioctl;
767 i = register_netdev(dev);
769 goto err_out_cleardev;
771 printk(KERN_INFO "%s: %s at 0x%lx, ",
772 dev->name, netdrv_tbl[chip_idx].name, ioaddr);
773 for (i = 0; i < 5; i++)
774 printk("%2.2x:", dev->dev_addr[i]);
775 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
777 if (drv_flags & CanHaveMII) {
778 int phy, phy_idx = 0;
780 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
781 mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
784 while (--boguscnt > 0)
785 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
788 printk("%s: PHY reset never completed!\n", dev->name);
791 mii_status = mdio_read(dev, phy, MII_BMSR);
792 if (mii_status != 0) {
793 np->phys[phy_idx++] = phy;
794 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
795 printk(KERN_INFO "%s: MII PHY found at address %d, status "
796 "0x%4.4x advertising %4.4x.\n",
797 dev->name, phy, mii_status, np->mii_if.advertising);
798 /* there can be only one PHY on-board */
802 np->phy_cnt = phy_idx;
804 np->mii_if.phy_id = np->phys[0];
806 memset(&np->mii_if, 0, sizeof(np->mii_if));
810 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming enabled.\n",
812 #else /* not ZEROCOPY */
813 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming disabled.\n",
815 #endif /* not ZEROCOPY */
820 pci_set_drvdata(pdev, NULL);
821 iounmap((void *)ioaddr);
823 pci_release_regions (pdev);
825 unregister_netdev(dev);
831 /* Read the MII Management Data I/O (MDIO) interfaces. */
832 static int mdio_read(struct net_device *dev, int phy_id, int location)
834 long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
835 int result, boguscnt=1000;
836 /* ??? Should we add a busy-wait here? */
838 result = readl(mdio_addr);
839 while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
842 if ((result & 0xffff) == 0xffff)
844 return result & 0xffff;
848 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
850 long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
851 writel(value, mdio_addr);
852 /* The busy-wait will occur before a read. */
856 static int netdev_open(struct net_device *dev)
858 struct netdev_private *np = dev->priv;
859 long ioaddr = dev->base_addr;
862 /* Do we ever need to reset the chip??? */
864 COMPAT_MOD_INC_USE_COUNT;
866 retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
868 COMPAT_MOD_DEC_USE_COUNT;
872 /* Disable the Rx and Tx, and reset the chip. */
873 writel(0, ioaddr + GenCtrl);
874 writel(1, ioaddr + PCIDeviceConfig);
876 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
877 dev->name, dev->irq);
878 /* Allocate the various queues, failing gracefully. */
879 if (np->tx_done_q == 0)
880 np->tx_done_q = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_done_q_dma);
881 if (np->rx_done_q == 0)
882 np->rx_done_q = pci_alloc_consistent(np->pci_dev, sizeof(struct rx_done_desc) * DONE_Q_SIZE, &np->rx_done_q_dma);
883 if (np->tx_ring == 0)
884 np->tx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->tx_ring_dma);
885 if (np->rx_ring == 0)
886 np->rx_ring = pci_alloc_consistent(np->pci_dev, PAGE_SIZE, &np->rx_ring_dma);
887 if (np->tx_done_q == 0 || np->rx_done_q == 0
888 || np->rx_ring == 0 || np->tx_ring == 0) {
890 pci_free_consistent(np->pci_dev, PAGE_SIZE,
891 np->tx_done_q, np->tx_done_q_dma);
893 pci_free_consistent(np->pci_dev, sizeof(struct rx_done_desc) * DONE_Q_SIZE,
894 np->rx_done_q, np->rx_done_q_dma);
896 pci_free_consistent(np->pci_dev, PAGE_SIZE,
897 np->tx_ring, np->tx_ring_dma);
899 pci_free_consistent(np->pci_dev, PAGE_SIZE,
900 np->rx_ring, np->rx_ring_dma);
901 COMPAT_MOD_DEC_USE_COUNT;
905 netif_carrier_off(dev);
907 /* Set the size of the Rx buffers. */
908 writel((np->rx_buf_sz << RxBufferLenShift) |
909 (0 << RxMinDescrThreshShift) |
910 RxPrefetchMode | RxVariableQ |
912 ioaddr + RxDescQCtrl);
915 /* Set Tx descriptor to type 0 and spacing to 64 bytes. */
916 writel((2 << TxHiPriFIFOThreshShift) |
917 (0 << TxPadLenShift) |
918 (4 << TxDMABurstSizeShift) |
919 TxDescSpace64 | TxDescType0,
920 ioaddr + TxDescCtrl);
921 #else /* not ZEROCOPY */
922 /* Set Tx descriptor to type 1 and padding to 0 bytes. */
923 writel((2 << TxHiPriFIFOThreshShift) |
924 (0 << TxPadLenShift) |
925 (4 << TxDMABurstSizeShift) |
926 TxDescSpaceUnlim | TxDescType1,
927 ioaddr + TxDescCtrl);
928 #endif /* not ZEROCOPY */
930 #if defined(ADDR_64BITS) && defined(__alpha__)
931 /* XXX We really need a 64-bit PCI dma interfaces too... -DaveM */
932 writel(np->rx_ring_dma >> 32, ioaddr + RxDescQHiAddr);
933 writel(np->tx_ring_dma >> 32, ioaddr + TxRingHiAddr);
935 writel(0, ioaddr + RxDescQHiAddr);
936 writel(0, ioaddr + TxRingHiAddr);
937 writel(0, ioaddr + CompletionHiAddr);
939 writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
940 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
942 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
943 #ifdef full_rx_status
944 writel(np->rx_done_q_dma |
946 (0 << RxComplThreshShift),
947 ioaddr + RxCompletionAddr);
948 #else /* not full_rx_status */
949 #ifdef csum_rx_status
950 writel(np->rx_done_q_dma |
952 (0 << RxComplThreshShift),
953 ioaddr + RxCompletionAddr);
954 #else /* not csum_rx_status */
955 writel(np->rx_done_q_dma |
957 (0 << RxComplThreshShift),
958 ioaddr + RxCompletionAddr);
959 #endif /* not csum_rx_status */
960 #endif /* not full_rx_status */
963 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
965 /* Fill both the unused Tx SA register and the Rx perfect filter. */
966 for (i = 0; i < 6; i++)
967 writeb(dev->dev_addr[i], ioaddr + StationAddr + 5 - i);
968 for (i = 0; i < 16; i++) {
969 u16 *eaddrs = (u16 *)dev->dev_addr;
970 long setup_frm = ioaddr + PerfFilterTable + i * 16;
971 writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
972 writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
973 writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
976 /* Initialize other registers. */
977 /* Configure the PCI bus bursts and FIFO thresholds. */
978 np->tx_mode = 0x0C04; /* modified when link is up. */
979 writel(0x8000 | np->tx_mode, ioaddr + TxMode);
981 writel(np->tx_mode, ioaddr + TxMode);
982 np->tx_threshold = 4;
983 writel(np->tx_threshold, ioaddr + TxThreshold);
985 interrupt_mitigation &= 0x1f;
986 np->intr_mitigation = interrupt_mitigation;
987 writel(np->intr_mitigation, ioaddr + IntrTimerCtrl);
990 netif_start_queue(dev);
993 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
996 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
999 /* Enable GPIO interrupts on link change */
1000 writel(0x0f00ff00, ioaddr + GPIOCtrl);
1002 /* Set the interrupt mask and enable PCI interrupts. */
1003 writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1004 IntrTxDone | IntrStatsMax | IntrLinkChange |
1005 IntrNormalSummary | IntrAbnormalSummary |
1006 IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1007 ioaddr + IntrEnable);
1008 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1009 ioaddr + PCIDeviceConfig);
1012 /* Load Rx/Tx firmware into the frame processors */
1013 for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++)
1014 writel(firmware_rx[i], ioaddr + RxGfpMem + i * 4);
1015 for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++)
1016 writel(firmware_tx[i], ioaddr + TxGfpMem + i * 4);
1017 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1018 writel(0x003F, ioaddr + GenCtrl);
1019 #else /* not HAS_FIRMWARE */
1020 /* Enable the Rx and Tx units only. */
1021 writel(0x000F, ioaddr + GenCtrl);
1022 #endif /* not HAS_FIRMWARE */
1025 printk(KERN_DEBUG "%s: Done netdev_open().\n",
1032 static void check_duplex(struct net_device *dev)
1034 struct netdev_private *np = dev->priv;
1037 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1038 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1040 while (mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET);
1042 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1044 if (!np->mii_if.force_media) {
1045 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1047 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1049 reg0 |= BMCR_SPEED100;
1050 if (np->mii_if.full_duplex)
1051 reg0 |= BMCR_FULLDPLX;
1052 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1054 np->speed100 ? "100" : "10",
1055 np->mii_if.full_duplex ? "full" : "half");
1057 mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1061 static void tx_timeout(struct net_device *dev)
1063 struct netdev_private *np = dev->priv;
1064 long ioaddr = dev->base_addr;
1066 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
1067 " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
1072 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
1073 for (i = 0; i < RX_RING_SIZE; i++)
1074 printk(" %8.8x", (unsigned int)le32_to_cpu(np->rx_ring[i].rxaddr));
1075 printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
1076 for (i = 0; i < TX_RING_SIZE; i++)
1077 printk(" %4.4x", le32_to_cpu(np->tx_ring[i].status));
1082 /* Perhaps we should reinitialize the hardware here. */
1083 /* Stop and restart the chip's Tx processes . */
1085 /* Trigger an immediate transmit demand. */
1087 dev->trans_start = jiffies;
1088 np->stats.tx_errors++;
1089 netif_wake_queue(dev);
1093 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1094 static void init_ring(struct net_device *dev)
1096 struct netdev_private *np = dev->priv;
1100 np->cur_rx = np->cur_tx = 0;
1101 np->dirty_rx = np->rx_done = np->dirty_tx = np->tx_done = 0;
1103 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1105 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1106 for (i = 0; i < RX_RING_SIZE; i++) {
1107 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1108 np->rx_info[i].skb = skb;
1111 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1112 skb->dev = dev; /* Mark as being used by this device. */
1113 /* Grrr, we cannot offset to correctly align the IP header. */
1114 np->rx_ring[i].rxaddr = cpu_to_le32(np->rx_info[i].mapping | RxDescValid);
1116 writew(i - 1, dev->base_addr + RxDescQIdx);
1117 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1119 /* Clear the remainder of the Rx buffer ring. */
1120 for ( ; i < RX_RING_SIZE; i++) {
1121 np->rx_ring[i].rxaddr = 0;
1122 np->rx_info[i].skb = NULL;
1123 np->rx_info[i].mapping = 0;
1125 /* Mark the last entry as wrapping the ring. */
1126 np->rx_ring[i-1].rxaddr |= cpu_to_le32(RxDescEndRing);
1128 /* Clear the completion rings. */
1129 for (i = 0; i < DONE_Q_SIZE; i++) {
1130 np->rx_done_q[i].status = 0;
1131 np->tx_done_q[i].status = 0;
1134 for (i = 0; i < TX_RING_SIZE; i++) {
1135 np->tx_info[i].skb = NULL;
1136 np->tx_info[i].first_mapping = 0;
1140 for (j = 0; j < MAX_STARFIRE_FRAGS; j++)
1141 np->tx_info[i].frag_mapping[j] = 0;
1143 #endif /* ZEROCOPY */
1144 np->tx_ring[i].status = 0;
1150 static int start_tx(struct sk_buff *skb, struct net_device *dev)
1152 struct netdev_private *np = dev->priv;
1158 kick_tx_timer(dev, tx_timeout, TX_TIMEOUT);
1160 /* Caution: the write order is important here, set the field
1161 with the "ownership" bits last. */
1163 /* Calculate the next Tx descriptor entry. */
1164 entry = np->cur_tx % TX_RING_SIZE;
1166 #if defined(ZEROCOPY) && defined(HAS_FIRMWARE) && defined(HAS_BROKEN_FIRMWARE)
1168 int has_bad_length = 0;
1170 if (skb_first_frag_len(skb) == 1)
1173 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1174 if (skb_shinfo(skb)->frags[i].size == 1) {
1181 skb_checksum_help(skb);
1183 #endif /* ZEROCOPY && HAS_FIRMWARE && HAS_BROKEN_FIRMWARE */
1185 np->tx_info[entry].skb = skb;
1186 np->tx_info[entry].first_mapping =
1187 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1189 np->tx_ring[entry].first_addr = cpu_to_le32(np->tx_info[entry].first_mapping);
1191 np->tx_ring[entry].first_len = cpu_to_le16(skb_first_frag_len(skb));
1192 np->tx_ring[entry].total_len = cpu_to_le16(skb->len);
1193 /* Add "| TxDescIntr" to generate Tx-done interrupts. */
1194 np->tx_ring[entry].status = cpu_to_le32(TxDescID | TxCRCEn);
1195 np->tx_ring[entry].nbufs = cpu_to_le32(skb_shinfo(skb)->nr_frags + 1);
1196 #else /* not ZEROCOPY */
1197 /* Add "| TxDescIntr" to generate Tx-done interrupts. */
1198 np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID | TxCRCEn | 1 << 16);
1199 #endif /* not ZEROCOPY */
1201 if (entry >= TX_RING_SIZE-1) /* Wrap ring */
1202 np->tx_ring[entry].status |= cpu_to_le32(TxRingWrap | TxDescIntr);
1205 if (skb->ip_summed == CHECKSUM_HW) {
1206 np->tx_ring[entry].status |= cpu_to_le32(TxCalTCP);
1207 np->stats.tx_compressed++;
1209 #endif /* ZEROCOPY */
1213 printk(KERN_DEBUG "%s: Tx #%d slot %d status %8.8x nbufs %d len %4.4x/%4.4x.\n",
1214 dev->name, np->cur_tx, entry,
1215 le32_to_cpu(np->tx_ring[entry].status),
1216 le32_to_cpu(np->tx_ring[entry].nbufs),
1217 le32_to_cpu(np->tx_ring[entry].first_len),
1218 le32_to_cpu(np->tx_ring[entry].total_len));
1219 #else /* not ZEROCOPY */
1220 printk(KERN_DEBUG "%s: Tx #%d slot %d status %8.8x.\n",
1221 dev->name, np->cur_tx, entry,
1222 le32_to_cpu(np->tx_ring[entry].status));
1223 #endif /* not ZEROCOPY */
1227 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1228 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i];
1230 /* we already have the proper value in entry */
1231 np->tx_info[entry].frag_mapping[i] =
1232 pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
1234 np->tx_ring[entry].frag[i].addr = cpu_to_le32(np->tx_info[entry].frag_mapping[i]);
1235 np->tx_ring[entry].frag[i].len = cpu_to_le32(this_frag->size);
1237 printk(KERN_DEBUG "%s: Tx #%d frag %d len %4.4x.\n",
1238 dev->name, np->cur_tx, i,
1239 le32_to_cpu(np->tx_ring[entry].frag[i].len));
1242 #endif /* ZEROCOPY */
1246 if (entry >= TX_RING_SIZE-1) /* Wrap ring */
1250 /* Non-x86: explicitly flush descriptor cache lines here. */
1251 /* Ensure everything is written back above before the transmit is
1255 /* Update the producer index. */
1256 writel(entry * (sizeof(struct starfire_tx_desc) / 8), dev->base_addr + TxProducerIdx);
1258 if (np->cur_tx - np->dirty_tx >= TX_RING_SIZE - 1) {
1260 netif_stop_queue(dev);
1263 dev->trans_start = jiffies;
1269 /* The interrupt handler does all of the Rx thread work and cleans up
1270 after the Tx thread. */
1271 static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1273 struct net_device *dev = (struct net_device *)dev_instance;
1274 struct netdev_private *np;
1276 int boguscnt = max_interrupt_work;
1280 #ifndef final_version /* Can never occur. */
1282 printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown device.\n", irq);
1287 ioaddr = dev->base_addr;
1291 u32 intr_status = readl(ioaddr + IntrClear);
1294 printk(KERN_DEBUG "%s: Interrupt status %4.4x.\n",
1295 dev->name, intr_status);
1297 if (intr_status == 0)
1300 if (intr_status & IntrRxDone)
1303 /* Scavenge the skbuff list based on the Tx-done queue.
1304 There are redundant checks here that may be cleaned up
1305 after the driver has proven to be reliable. */
1306 consumer = readl(ioaddr + TxConsumerIdx);
1308 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1309 dev->name, consumer);
1311 if (np->tx_done >= 250 || np->tx_done == 0)
1312 printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x, %d is %8.8x.\n",
1313 dev->name, np->tx_done,
1314 le32_to_cpu(np->tx_done_q[np->tx_done].status),
1315 (np->tx_done+1) & (DONE_Q_SIZE-1),
1316 le32_to_cpu(np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status));
1319 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1321 printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n",
1322 dev->name, np->tx_done, tx_status);
1323 if ((tx_status & 0xe0000000) == 0xa0000000) {
1324 np->stats.tx_packets++;
1325 } else if ((tx_status & 0xe0000000) == 0x80000000) {
1326 struct sk_buff *skb;
1329 #endif /* ZEROCOPY */
1330 u16 entry = tx_status; /* Implicit truncate */
1331 entry /= sizeof(struct starfire_tx_desc);
1333 skb = np->tx_info[entry].skb;
1334 np->tx_info[entry].skb = NULL;
1335 pci_unmap_single(np->pci_dev,
1336 np->tx_info[entry].first_mapping,
1337 skb_first_frag_len(skb),
1339 np->tx_info[entry].first_mapping = 0;
1342 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1343 pci_unmap_single(np->pci_dev,
1344 np->tx_info[entry].frag_mapping[i],
1345 skb_shinfo(skb)->frags[i].size,
1347 np->tx_info[entry].frag_mapping[i] = 0;
1349 #endif /* ZEROCOPY */
1351 /* Scavenge the descriptor. */
1352 dev_kfree_skb_irq(skb);
1356 np->tx_done_q[np->tx_done].status = 0;
1357 np->tx_done = (np->tx_done+1) & (DONE_Q_SIZE-1);
1359 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1361 if (np->tx_full && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) {
1362 /* The ring is no longer full, wake the queue. */
1364 netif_wake_queue(dev);
1367 /* Stats overflow */
1368 if (intr_status & IntrStatsMax) {
1372 /* Media change interrupt. */
1373 if (intr_status & IntrLinkChange)
1374 netdev_media_change(dev);
1376 /* Abnormal error summary/uncommon events handlers. */
1377 if (intr_status & IntrAbnormalSummary)
1378 netdev_error(dev, intr_status);
1380 if (--boguscnt < 0) {
1381 printk(KERN_WARNING "%s: Too much work at interrupt, "
1382 "status=0x%4.4x.\n",
1383 dev->name, intr_status);
1389 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1390 dev->name, (int)readl(ioaddr + IntrStatus));
1392 #ifndef final_version
1393 /* Code that should never be run! Remove after testing.. */
1395 static int stopit = 10;
1396 if (!netif_running(dev) && --stopit < 0) {
1397 printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
1406 /* This routine is logically part of the interrupt handler, but separated
1407 for clarity and better register allocation. */
1408 static int netdev_rx(struct net_device *dev)
1410 struct netdev_private *np = dev->priv;
1411 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1414 if (np->rx_done_q == 0) {
1415 printk(KERN_ERR "%s: rx_done_q is NULL! rx_done is %d. %p.\n",
1416 dev->name, np->rx_done, np->tx_done_q);
1420 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1421 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1422 struct sk_buff *skb;
1427 printk(KERN_DEBUG " netdev_rx() status of %d was %8.8x.\n", np->rx_done, desc_status);
1430 if ( ! (desc_status & RxOK)) {
1431 /* There was a error. */
1433 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n", desc_status);
1434 np->stats.rx_errors++;
1435 if (desc_status & RxFIFOErr)
1436 np->stats.rx_fifo_errors++;
1440 pkt_len = desc_status; /* Implicitly Truncate */
1441 entry = (desc_status >> 16) & 0x7ff;
1443 #ifndef final_version
1445 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, bogus_cnt %d.\n", pkt_len, boguscnt);
1447 /* Check if the packet is long enough to accept without copying
1448 to a minimally-sized skbuff. */
1449 if (pkt_len < rx_copybreak
1450 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1452 skb_reserve(skb, 2); /* 16 byte align the IP header */
1453 pci_dma_sync_single(np->pci_dev,
1454 np->rx_info[entry].mapping,
1455 pkt_len, PCI_DMA_FROMDEVICE);
1456 #if HAS_IP_COPYSUM /* Call copy + cksum if available. */
1457 eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0);
1458 skb_put(skb, pkt_len);
1460 memcpy(skb_put(skb, pkt_len), np->rx_info[entry].skb->tail, pkt_len);
1463 pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1464 skb = np->rx_info[entry].skb;
1465 skb_put(skb, pkt_len);
1466 np->rx_info[entry].skb = NULL;
1467 np->rx_info[entry].mapping = 0;
1469 #ifndef final_version /* Remove after testing. */
1470 /* You will want this info for the initial debug. */
1472 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1473 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
1475 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1476 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1477 skb->data[8], skb->data[9], skb->data[10],
1478 skb->data[11], skb->data[12], skb->data[13],
1479 skb->data[14], skb->data[15], skb->data[16],
1482 skb->protocol = eth_type_trans(skb, dev);
1483 #if defined(full_rx_status) || defined(csum_rx_status)
1484 if (le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0x01000000) {
1485 skb->ip_summed = CHECKSUM_UNNECESSARY;
1486 np->stats.rx_compressed++;
1489 * This feature doesn't seem to be working, at least
1490 * with the two firmware versions I have. If the GFP sees
1491 * a fragment, it either ignores it completely, or reports
1492 * "bad checksum" on it.
1494 * Maybe I missed something -- corrections are welcome.
1495 * Until then, the printk stays. :-) -Ion
1497 else if (le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0x00400000) {
1498 skb->ip_summed = CHECKSUM_HW;
1499 skb->csum = le32_to_cpu(np->rx_done_q[np->rx_done].status2) & 0xffff;
1500 printk(KERN_DEBUG "%s: checksum_hw, status2 = %x\n", dev->name, np->rx_done_q[np->rx_done].status2);
1504 dev->last_rx = jiffies;
1505 np->stats.rx_packets++;
1509 np->rx_done_q[np->rx_done].status = 0;
1510 np->rx_done = (np->rx_done + 1) & (DONE_Q_SIZE-1);
1512 writew(np->rx_done, dev->base_addr + CompletionQConsumerIdx);
1514 /* Refill the Rx ring buffers. */
1515 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1516 struct sk_buff *skb;
1517 int entry = np->dirty_rx % RX_RING_SIZE;
1518 if (np->rx_info[entry].skb == NULL) {
1519 skb = dev_alloc_skb(np->rx_buf_sz);
1520 np->rx_info[entry].skb = skb;
1522 break; /* Better luck next round. */
1523 np->rx_info[entry].mapping =
1524 pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1525 skb->dev = dev; /* Mark as being used by this device. */
1526 np->rx_ring[entry].rxaddr =
1527 cpu_to_le32(np->rx_info[entry].mapping | RxDescValid);
1529 if (entry == RX_RING_SIZE - 1)
1530 np->rx_ring[entry].rxaddr |= cpu_to_le32(RxDescEndRing);
1531 /* We could defer this until later... */
1532 writew(entry, dev->base_addr + RxDescQIdx);
1536 printk(KERN_DEBUG " exiting netdev_rx() status of %d was %8.8x.\n",
1537 np->rx_done, desc_status);
1539 /* Restart Rx engine if stopped. */
1544 static void netdev_media_change(struct net_device *dev)
1546 struct netdev_private *np = dev->priv;
1547 long ioaddr = dev->base_addr;
1548 u16 reg0, reg1, reg4, reg5;
1551 /* reset status first */
1552 mdio_read(dev, np->phys[0], MII_BMCR);
1553 mdio_read(dev, np->phys[0], MII_BMSR);
1555 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1556 reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1558 if (reg1 & BMSR_LSTATUS) {
1560 if (reg0 & BMCR_ANENABLE) {
1561 /* autonegotiation is enabled */
1562 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1563 reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1564 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1566 np->mii_if.full_duplex = 1;
1567 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1569 np->mii_if.full_duplex = 0;
1570 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1572 np->mii_if.full_duplex = 1;
1575 np->mii_if.full_duplex = 0;
1578 /* autonegotiation is disabled */
1579 if (reg0 & BMCR_SPEED100)
1583 if (reg0 & BMCR_FULLDPLX)
1584 np->mii_if.full_duplex = 1;
1586 np->mii_if.full_duplex = 0;
1588 netif_carrier_on(dev);
1589 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1591 np->speed100 ? "100" : "10",
1592 np->mii_if.full_duplex ? "full" : "half");
1594 new_tx_mode = np->tx_mode & ~0x2; /* duplex setting */
1595 if (np->mii_if.full_duplex)
1597 if (np->tx_mode != new_tx_mode) {
1598 np->tx_mode = new_tx_mode;
1599 writel(np->tx_mode | 0x8000, ioaddr + TxMode);
1601 writel(np->tx_mode, ioaddr + TxMode);
1604 netif_carrier_off(dev);
1605 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1610 static void netdev_error(struct net_device *dev, int intr_status)
1612 struct netdev_private *np = dev->priv;
1614 /* Came close to underrunning the Tx FIFO, increase threshold. */
1615 if (intr_status & IntrTxDataLow) {
1616 writel(++np->tx_threshold, dev->base_addr + TxThreshold);
1617 printk(KERN_NOTICE "%s: Increasing Tx FIFO threshold to %d bytes\n",
1618 dev->name, np->tx_threshold * 16);
1620 if (intr_status & IntrRxGFPDead) {
1621 np->stats.rx_fifo_errors++;
1622 np->stats.rx_errors++;
1624 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1625 np->stats.tx_fifo_errors++;
1626 np->stats.tx_errors++;
1628 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1629 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1630 dev->name, intr_status);
1634 static struct net_device_stats *get_stats(struct net_device *dev)
1636 long ioaddr = dev->base_addr;
1637 struct netdev_private *np = dev->priv;
1639 /* This adapter architecture needs no SMP locks. */
1640 np->stats.tx_bytes = readl(ioaddr + 0x57010);
1641 np->stats.rx_bytes = readl(ioaddr + 0x57044);
1642 np->stats.tx_packets = readl(ioaddr + 0x57000);
1643 np->stats.tx_aborted_errors =
1644 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1645 np->stats.tx_window_errors = readl(ioaddr + 0x57018);
1646 np->stats.collisions =
1647 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1649 /* The chip only need report frame silently dropped. */
1650 np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1651 writew(0, ioaddr + RxDMAStatus);
1652 np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1653 np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1654 np->stats.rx_length_errors = readl(ioaddr + 0x57058);
1655 np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1661 /* Chips may use the upper or lower CRC bits, and may reverse and/or invert
1662 them. Select the endian-ness that results in minimal calculations.
1665 static void set_rx_mode(struct net_device *dev)
1667 long ioaddr = dev->base_addr;
1669 struct dev_mc_list *mclist;
1672 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1673 rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptAll|AcceptMyPhys;
1674 } else if ((dev->mc_count > multicast_filter_limit)
1675 || (dev->flags & IFF_ALLMULTI)) {
1676 /* Too many to match, or accept all multicasts. */
1677 rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptMyPhys;
1678 } else if (dev->mc_count <= 15) {
1679 /* Use the 16 element perfect filter, skip first entry. */
1680 long filter_addr = ioaddr + PerfFilterTable + 1 * 16;
1681 for (i = 1, mclist = dev->mc_list; mclist && i <= dev->mc_count;
1682 i++, mclist = mclist->next) {
1683 u16 *eaddrs = (u16 *)mclist->dmi_addr;
1684 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4;
1685 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1686 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8;
1689 writew(0xffff, filter_addr); filter_addr += 4;
1690 writew(0xffff, filter_addr); filter_addr += 4;
1691 writew(0xffff, filter_addr); filter_addr += 8;
1693 rx_mode = AcceptBroadcast | AcceptMyPhys;
1695 /* Must use a multicast hash table. */
1697 u16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1699 memset(mc_filter, 0, sizeof(mc_filter));
1700 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1701 i++, mclist = mclist->next) {
1702 int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
1703 __u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1];
1705 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1707 /* Clear the perfect filter list, skip first entry. */
1708 filter_addr = ioaddr + PerfFilterTable + 1 * 16;
1709 for (i = 1; i < 16; i++) {
1710 writew(0xffff, filter_addr); filter_addr += 4;
1711 writew(0xffff, filter_addr); filter_addr += 4;
1712 writew(0xffff, filter_addr); filter_addr += 8;
1714 for (filter_addr = ioaddr + HashTable, i=0; i < 32; filter_addr+= 16, i++)
1715 writew(mc_filter[i], filter_addr);
1716 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1718 writel(rx_mode, ioaddr + RxFilterMode);
1722 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
1724 struct ethtool_cmd ecmd;
1725 struct netdev_private *np = dev->priv;
1727 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1731 case ETHTOOL_GDRVINFO: {
1732 struct ethtool_drvinfo info;
1733 memset(&info, 0, sizeof(info));
1734 info.cmd = ecmd.cmd;
1735 strcpy(info.driver, DRV_NAME);
1736 strcpy(info.version, DRV_VERSION);
1737 *info.fw_version = 0;
1738 strcpy(info.bus_info, PCI_SLOT_NAME(np->pci_dev));
1739 if (copy_to_user(useraddr, &info, sizeof(info)))
1745 case ETHTOOL_GSET: {
1746 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1747 spin_lock_irq(&np->lock);
1748 mii_ethtool_gset(&np->mii_if, &ecmd);
1749 spin_unlock_irq(&np->lock);
1750 if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
1755 case ETHTOOL_SSET: {
1757 struct ethtool_cmd ecmd;
1758 if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
1760 spin_lock_irq(&np->lock);
1761 r = mii_ethtool_sset(&np->mii_if, &ecmd);
1762 spin_unlock_irq(&np->lock);
1765 /* restart autonegotiation */
1766 case ETHTOOL_NWAY_RST: {
1767 return mii_nway_restart(&np->mii_if);
1769 /* get link status */
1770 case ETHTOOL_GLINK: {
1771 struct ethtool_value edata = {ETHTOOL_GLINK};
1772 edata.data = mii_link_ok(&np->mii_if);
1773 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1778 /* get message-level */
1779 case ETHTOOL_GMSGLVL: {
1780 struct ethtool_value edata = {ETHTOOL_GMSGLVL};
1782 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1786 /* set message-level */
1787 case ETHTOOL_SMSGLVL: {
1788 struct ethtool_value edata;
1789 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1800 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1802 struct netdev_private *np = dev->priv;
1803 struct mii_ioctl_data *data = (struct mii_ioctl_data *) & rq->ifr_data;
1806 if (!netif_running(dev))
1809 if (cmd == SIOCETHTOOL)
1810 rc = netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
1813 spin_lock_irq(&np->lock);
1814 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
1815 spin_unlock_irq(&np->lock);
1817 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
1824 static int netdev_close(struct net_device *dev)
1826 long ioaddr = dev->base_addr;
1827 struct netdev_private *np = dev->priv;
1830 netif_stop_queue(dev);
1834 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %4.4x.\n",
1835 dev->name, (int)readl(ioaddr + IntrStatus));
1836 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1837 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1840 /* Disable interrupts by clearing the interrupt mask. */
1841 writel(0, ioaddr + IntrEnable);
1843 /* Stop the chip's Tx and Rx processes. */
1847 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1849 for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
1850 printk(KERN_DEBUG " #%d desc. %8.8x %8.8x -> %8.8x.\n",
1851 i, le32_to_cpu(np->tx_ring[i].status),
1852 le32_to_cpu(np->tx_ring[i].first_addr),
1853 le32_to_cpu(np->tx_done_q[i].status));
1854 printk(KERN_DEBUG " Rx ring at %8.8x -> %p:\n",
1855 np->rx_ring_dma, np->rx_done_q);
1857 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
1858 printk(KERN_DEBUG " #%d desc. %8.8x -> %8.8x\n",
1859 i, le32_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
1862 #endif /* __i386__ debugging only */
1864 free_irq(dev->irq, dev);
1866 /* Free all the skbuffs in the Rx queue. */
1867 for (i = 0; i < RX_RING_SIZE; i++) {
1868 np->rx_ring[i].rxaddr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1869 if (np->rx_info[i].skb != NULL) {
1870 pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1871 dev_kfree_skb(np->rx_info[i].skb);
1873 np->rx_info[i].skb = NULL;
1874 np->rx_info[i].mapping = 0;
1876 for (i = 0; i < TX_RING_SIZE; i++) {
1877 struct sk_buff *skb = np->tx_info[i].skb;
1880 #endif /* ZEROCOPY */
1883 pci_unmap_single(np->pci_dev,
1884 np->tx_info[i].first_mapping,
1885 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1886 np->tx_info[i].first_mapping = 0;
1888 np->tx_info[i].skb = NULL;
1890 for (j = 0; j < MAX_STARFIRE_FRAGS; j++)
1891 if (np->tx_info[i].frag_mapping[j]) {
1892 pci_unmap_single(np->pci_dev,
1893 np->tx_info[i].frag_mapping[j],
1894 skb_shinfo(skb)->frags[j].size,
1896 np->tx_info[i].frag_mapping[j] = 0;
1899 #endif /* ZEROCOPY */
1902 COMPAT_MOD_DEC_USE_COUNT;
1908 static void __devexit starfire_remove_one (struct pci_dev *pdev)
1910 struct net_device *dev = pci_get_drvdata(pdev);
1911 struct netdev_private *np;
1918 pci_free_consistent(pdev, PAGE_SIZE,
1919 np->tx_done_q, np->tx_done_q_dma);
1921 pci_free_consistent(pdev,
1922 sizeof(struct rx_done_desc) * DONE_Q_SIZE,
1923 np->rx_done_q, np->rx_done_q_dma);
1925 pci_free_consistent(pdev, PAGE_SIZE,
1926 np->tx_ring, np->tx_ring_dma);
1928 pci_free_consistent(pdev, PAGE_SIZE,
1929 np->rx_ring, np->rx_ring_dma);
1931 unregister_netdev(dev);
1932 iounmap((char *)dev->base_addr);
1933 pci_release_regions(pdev);
1935 pci_set_drvdata(pdev, NULL);
1936 kfree(dev); /* Will also free np!! */
1940 static struct pci_driver starfire_driver = {
1942 .probe = starfire_init_one,
1943 .remove = __devexit_p(starfire_remove_one),
1944 .id_table = starfire_pci_tbl,
1948 static int __init starfire_init (void)
1950 /* when a module, this is printed whether or not devices are found in probe */
1954 return pci_module_init (&starfire_driver);
1958 static void __exit starfire_cleanup (void)
1960 pci_unregister_driver (&starfire_driver);
1964 module_init(starfire_init);
1965 module_exit(starfire_cleanup);
1970 * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O2 -c starfire.c"
1971 * simple-compile-command: "gcc -DMODULE -O2 -c starfire.c"