2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
8 * Copyright (C) 1999, 2000, 2001 Ralf Baechle
9 * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc.
12 * o IOC3 ASIC specification 4.51, 1996-04-18
13 * o IEEE 802.3 specification, 2000 edition
14 * o DP38840A Specification, National Semiconductor, March 1997
18 * o Handle allocation failures in ioc3_alloc_skb() more gracefully.
19 * o Handle allocation failures in ioc3_init_rings().
20 * o Use prefetching for large packets. What is a good lower limit for
22 * o We're probably allocating a bit too much memory.
23 * o Use hardware checksums.
24 * o Convert to using a IOC3 meta driver.
25 * o Which PHYs might possibly be attached to the IOC3 in real live,
26 * which workarounds are required for them? Do we ever have Lucent's?
27 * o For the 2.5 branch kill the mii-tool ioctls.
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/delay.h>
32 #include <linux/kernel.h>
34 #include <linux/errno.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/crc32.h>
40 #include <linux/serial.h>
41 #include <asm/serial.h>
42 #define IOC3_BAUD (22000000 / (3*16))
43 #define IOC3_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/ethtool.h>
49 #include <linux/skbuff.h>
50 #include <linux/dp83840.h>
52 #include <asm/byteorder.h>
54 #include <asm/pgtable.h>
55 #include <asm/uaccess.h>
56 #include <asm/sn/types.h>
57 #include <asm/sn/sn0/addrs.h>
58 #include <asm/sn/sn0/hubni.h>
59 #include <asm/sn/sn0/hubio.h>
60 #include <asm/sn/klconfig.h>
61 #include <asm/sn/ioc3.h>
62 #include <asm/sn/sn0/ip27.h>
63 #include <asm/pci/bridge.h>
66 * 64 RX buffers. This is tunable in the range of 16 <= x < 512. The
67 * value must be a power of two.
71 /* Timer state engine. */
72 enum ioc3_timer_state {
73 arbwait = 0, /* Waiting for auto negotiation to complete. */
74 lupwait = 1, /* Auto-neg complete, awaiting link-up status. */
75 ltrywait = 2, /* Forcing try of all modes, from fastest to slowest. */
76 asleep = 3, /* Time inactive. */
79 /* Private per NIC data of the driver. */
83 unsigned long *rxr; /* pointer to receiver ring */
84 struct ioc3_etxd *txr;
85 struct sk_buff *rx_skbs[512];
86 struct sk_buff *tx_skbs[128];
87 struct net_device_stats stats;
88 int rx_ci; /* RX consumer index */
89 int rx_pi; /* RX producer index */
90 int tx_ci; /* TX consumer index */
91 int tx_pi; /* TX producer index */
93 u32 emcr, ehar_h, ehar_l;
95 struct net_device *dev;
97 /* Members used by autonegotiation */
98 struct timer_list ioc3_timer;
99 enum ioc3_timer_state timer_state; /* State of auto-neg timer. */
100 unsigned int timer_ticks; /* Number of clicks at each state */
101 unsigned short sw_bmcr; /* sw copy of MII config register */
102 unsigned short sw_bmsr; /* sw copy of MII status register */
103 unsigned short sw_physid1; /* sw copy of PHYSID1 */
104 unsigned short sw_physid2; /* sw copy of PHYSID2 */
105 unsigned short sw_advertise; /* sw copy of ADVERTISE */
106 unsigned short sw_lpa; /* sw copy of LPA */
107 unsigned short sw_csconfig; /* sw copy of CSCONFIG */
110 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
111 static void ioc3_set_multicast_list(struct net_device *dev);
112 static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
113 static void ioc3_timeout(struct net_device *dev);
114 static inline unsigned int ioc3_hash(const unsigned char *addr);
115 static inline void ioc3_stop(struct ioc3_private *ip);
116 static void ioc3_init(struct ioc3_private *ip);
118 static const char ioc3_str[] = "IOC3 Ethernet";
120 /* We use this to acquire receive skb's that we can DMA directly into. */
121 #define ALIGNED_RX_SKB_ADDR(addr) \
122 ((((unsigned long)(addr) + (128 - 1)) & ~(128 - 1)) - (unsigned long)(addr))
124 #define ioc3_alloc_skb(__length, __gfp_flags) \
125 ({ struct sk_buff *__skb; \
126 __skb = alloc_skb((__length) + 128, (__gfp_flags)); \
128 int __offset = ALIGNED_RX_SKB_ADDR(__skb->data); \
130 skb_reserve(__skb, __offset); \
135 /* BEWARE: The IOC3 documentation documents the size of rx buffers as
136 1644 while it's actually 1664. This one was nasty to track down ... */
138 #define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + 128)
140 /* DMA barrier to separate cached and uncached accesses. */
142 __asm__("sync" ::: "memory")
145 #define IOC3_SIZE 0x100000
147 #define ioc3_r(reg) \
154 #define ioc3_w(reg,val) \
156 (ioc3->reg = (val)); \
160 mcr_pack(u32 pulse, u32 sample)
162 return (pulse << 10) | (sample << 2);
166 nic_wait(struct ioc3 *ioc3)
172 } while (!(mcr & 2));
178 nic_reset(struct ioc3 *ioc3)
182 ioc3_w(mcr, mcr_pack(500, 65));
183 presence = nic_wait(ioc3);
185 ioc3_w(mcr, mcr_pack(0, 500));
192 nic_read_bit(struct ioc3 *ioc3)
196 ioc3_w(mcr, mcr_pack(6, 13));
197 result = nic_wait(ioc3);
198 ioc3_w(mcr, mcr_pack(0, 100));
205 nic_write_bit(struct ioc3 *ioc3, int bit)
208 ioc3_w(mcr, mcr_pack(6, 110));
210 ioc3_w(mcr, mcr_pack(80, 30));
216 * Read a byte from an iButton device
219 nic_read_byte(struct ioc3 *ioc3)
224 for (i = 0; i < 8; i++)
225 result = (result >> 1) | (nic_read_bit(ioc3) << 7);
231 * Write a byte to an iButton device
234 nic_write_byte(struct ioc3 *ioc3, int byte)
238 for (i = 8; i; i--) {
242 nic_write_bit(ioc3, bit);
247 nic_find(struct ioc3 *ioc3, int *last)
249 int a, b, index, disc;
254 nic_write_byte(ioc3, 0xf0);
256 /* Algorithm from ``Book of iButton Standards''. */
257 for (index = 0, disc = 0; index < 64; index++) {
258 a = nic_read_bit(ioc3);
259 b = nic_read_bit(ioc3);
262 printk("NIC search failed (not fatal).\n");
268 if (index == *last) {
269 address |= 1UL << index;
270 } else if (index > *last) {
271 address &= ~(1UL << index);
273 } else if ((address & (1UL << index)) == 0)
275 nic_write_bit(ioc3, address & (1UL << index));
279 address |= 1UL << index;
281 address &= ~(1UL << index);
282 nic_write_bit(ioc3, a);
292 static int nic_init(struct ioc3 *ioc3)
303 reg = nic_find(ioc3, &save);
305 switch (reg & 0xff) {
311 /* Let the caller try again. */
320 nic_write_byte(ioc3, 0x55);
321 for (i = 0; i < 8; i++)
322 nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff);
324 reg >>= 8; /* Shift out type. */
325 for (i = 0; i < 6; i++) {
326 serial[i] = reg & 0xff;
333 printk("Found %s NIC", type);
334 if (type != "unknown") {
335 printk (" registration number %02x:%02x:%02x:%02x:%02x:%02x,"
336 " CRC %02x", serial[0], serial[1], serial[2],
337 serial[3], serial[4], serial[5], crc);
345 * Read the NIC (Number-In-a-Can) device used to store the MAC address on
346 * SN0 / SN00 nodeboards and PCI cards.
348 static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
350 struct ioc3 *ioc3 = ip->regs;
352 int tries = 2; /* There may be some problem with the battery? */
355 ioc3_w(gpcr_s, (1 << 21));
364 printk("Failed to read MAC address\n");
369 nic_write_byte(ioc3, 0xf0);
370 nic_write_byte(ioc3, 0x00);
371 nic_write_byte(ioc3, 0x00);
373 for (i = 13; i >= 0; i--)
374 nic[i] = nic_read_byte(ioc3);
376 for (i = 2; i < 8; i++)
377 ip->dev->dev_addr[i - 2] = nic[i];
380 #if defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_SGI_SN2)
382 * Get the ether-address on SN1 nodes
384 static void ioc3_get_eaddr_sn(struct ioc3_private *ip)
386 int ibrick_mac_addr_get(nasid_t, char *);
387 struct ioc3 *ioc3 = ip->regs;
388 nasid_t nasid_of_ioc3;
394 * err_val = ibrick_mac_addr_get(get_nasid(), io7eaddr );
396 * BAD!! The above call uses get_nasid() and assumes that
397 * the ioc3 pointed to by struct ioc3 is hooked up to the
398 * cbrick that we're running on. The proper way to make this call
399 * is to figure out which nasid the ioc3 is connected to
400 * and use that to call ibrick_mac_addr_get. Below is
401 * a hack to do just that.
405 * Get the nasid of the ioc3 from the ioc3's base addr.
406 * FIXME: the 8 at the end assumes we're in memory mode,
407 * not node mode (for that, we'd change it to a 9).
408 * Is there a call to extract this info from a physical
409 * addr somewhere in an sn header file already? If so,
410 * we should probably use that, or restructure this routine
411 * to use pci_dev and generic numa nodeid getting stuff.
413 nasid_of_ioc3 = (((unsigned long)ioc3 >> 33) & ~(-1 << 8));
414 err_val = ibrick_mac_addr_get(nasid_of_ioc3, io7eaddr );
418 /* Couldn't read the eeprom; try OSLoadOptions. */
419 printk("WARNING: ibrick_mac_addr_get failed: %d\n", err_val);
421 /* this is where we hardwire the mac address
422 * 1st ibrick had 08:00:69:11:34:75
423 * 2nd ibrick had 08:00:69:11:35:35
426 * mankato1 08:00:69:11:BE:95
427 * warroad 08:00:69:11:bd:60
428 * duron 08:00:69:11:34:60
430 * an easy way to get the mac address is to hook
431 * up an ip35, then from L1 do 'cti serial'
432 * and then look for MAC line XXX THIS DOESN"T QUITE WORK!!
434 printk("ioc3_get_eaddr: setting ethernet address to:\n -----> ");
435 ip->dev->dev_addr[0] = 0x8;
436 ip->dev->dev_addr[1] = 0x0;
437 ip->dev->dev_addr[2] = 0x69;
438 ip->dev->dev_addr[3] = 0x11;
439 ip->dev->dev_addr[4] = 0x34;
440 ip->dev->dev_addr[5] = 0x60;
443 long simple_strtol(const char *,char **,unsigned int);
445 mac = simple_strtol(io7eaddr, (char **)0, 16);
446 ip->dev->dev_addr[0] = (mac >> 40) & 0xff;
447 ip->dev->dev_addr[1] = (mac >> 32) & 0xff;
448 ip->dev->dev_addr[2] = (mac >> 24) & 0xff;
449 ip->dev->dev_addr[3] = (mac >> 16) & 0xff;
450 ip->dev->dev_addr[4] = (mac >> 8) & 0xff;
451 ip->dev->dev_addr[5] = mac & 0xff;
457 * Ok, this is hosed by design. It's necessary to know what machine the
458 * NIC is in in order to know how to read the NIC address. We also have
459 * to know if it's a PCI card or a NIC in on the node board ...
461 static void ioc3_get_eaddr(struct ioc3_private *ip)
463 void (*do_get_eaddr)(struct ioc3_private *ip) = NULL;
467 * We should also use this code for PCI cards, no matter what host
468 * machine but how to know that we're a PCI card?
470 #ifdef CONFIG_SGI_IP27
471 do_get_eaddr = ioc3_get_eaddr_nic;
473 #if defined(CONFIG_IA64_SGI_SN1) || defined(CONFIG_IA64_SGI_SN2)
474 do_get_eaddr = ioc3_get_eaddr_sn;
478 printk(KERN_ERR "Don't know how to read MAC address of this "
483 printk("Ethernet address is ");
484 for (i = 0; i < 6; i++) {
485 printk("%02x", ip->dev->dev_addr[i]);
494 * Caller must hold the ioc3_lock ever for MII readers. This is also
495 * used to protect the transmitter side but it's low contention.
497 static u16 mii_read(struct ioc3_private *ip, int reg)
499 struct ioc3 *ioc3 = ip->regs;
502 while (ioc3->micr & MICR_BUSY);
503 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG;
504 while (ioc3->micr & MICR_BUSY);
506 return ioc3->midr_r & MIDR_DATA_MASK;
509 static void mii_write(struct ioc3_private *ip, int reg, u16 data)
511 struct ioc3 *ioc3 = ip->regs;
514 while (ioc3->micr & MICR_BUSY);
516 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg;
517 while (ioc3->micr & MICR_BUSY);
520 static int ioc3_mii_init(struct ioc3_private *ip);
522 static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
524 struct ioc3_private *ip = dev->priv;
525 struct ioc3 *ioc3 = ip->regs;
527 ip->stats.collisions += (ioc3->etcdc & ETCDC_COLLCNT_MASK);
532 ioc3_rx(struct ioc3_private *ip)
534 struct sk_buff *skb, *new_skb;
535 struct ioc3 *ioc3 = ip->regs;
536 int rx_entry, n_entry, len;
537 struct ioc3_erxbuf *rxb;
541 rxr = (unsigned long *) ip->rxr; /* Ring base */
542 rx_entry = ip->rx_ci; /* RX consume index */
545 skb = ip->rx_skbs[rx_entry];
546 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
547 w0 = be32_to_cpu(rxb->w0);
549 while (w0 & ERXBUF_V) {
550 err = be32_to_cpu(rxb->err); /* It's valid ... */
551 if (err & ERXBUF_GOODPKT) {
552 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
554 skb->protocol = eth_type_trans(skb, ip->dev);
556 new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
558 /* Ouch, drop packet and just recycle packet
559 to keep the ring filled. */
560 ip->stats.rx_dropped++;
566 ip->rx_skbs[rx_entry] = NULL; /* Poison */
568 new_skb->dev = ip->dev;
570 /* Because we reserve afterwards. */
571 skb_put(new_skb, (1664 + RX_OFFSET));
572 rxb = (struct ioc3_erxbuf *) new_skb->data;
573 skb_reserve(new_skb, RX_OFFSET);
575 ip->dev->last_rx = jiffies;
576 ip->stats.rx_packets++; /* Statistics */
577 ip->stats.rx_bytes += len;
579 /* The frame is invalid and the skb never
580 reached the network layer so we can just
583 ip->stats.rx_errors++;
585 if (err & ERXBUF_CRCERR) /* Statistics */
586 ip->stats.rx_crc_errors++;
587 if (err & ERXBUF_FRAMERR)
588 ip->stats.rx_frame_errors++;
590 ip->rx_skbs[n_entry] = new_skb;
591 rxr[n_entry] = cpu_to_be32((0xa5UL << 56) |
592 ((unsigned long) rxb & TO_PHYS_MASK));
593 rxb->w0 = 0; /* Clear valid flag */
594 n_entry = (n_entry + 1) & 511; /* Update erpir */
596 /* Now go on to the next ring entry. */
597 rx_entry = (rx_entry + 1) & 511;
598 skb = ip->rx_skbs[rx_entry];
599 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
600 w0 = be32_to_cpu(rxb->w0);
602 ioc3->erpir = (n_entry << 3) | ERPIR_ARM;
604 ip->rx_ci = rx_entry;
608 ioc3_tx(struct ioc3_private *ip)
610 unsigned long packets, bytes;
611 struct ioc3 *ioc3 = ip->regs;
612 int tx_entry, o_entry;
616 spin_lock(&ip->ioc3_lock);
619 tx_entry = (etcir >> 7) & 127;
624 while (o_entry != tx_entry) {
626 skb = ip->tx_skbs[o_entry];
628 dev_kfree_skb_irq(skb);
629 ip->tx_skbs[o_entry] = NULL;
631 o_entry = (o_entry + 1) & 127; /* Next */
633 etcir = ioc3->etcir; /* More pkts sent? */
634 tx_entry = (etcir >> 7) & 127;
637 ip->stats.tx_packets += packets;
638 ip->stats.tx_bytes += bytes;
639 ip->txqlen -= packets;
641 if (ip->txqlen < 128)
642 netif_wake_queue(ip->dev);
645 spin_unlock(&ip->ioc3_lock);
649 * Deal with fatal IOC3 errors. This condition might be caused by a hard or
650 * software problems, so we should try to recover
651 * more gracefully if this ever happens. In theory we might be flooded
652 * with such error interrupts if something really goes wrong, so we might
653 * also consider to take the interface down.
656 ioc3_error(struct ioc3_private *ip, u32 eisr)
658 struct net_device *dev = ip->dev;
659 unsigned char *iface = dev->name;
661 if (eisr & EISR_RXOFLO)
662 printk(KERN_ERR "%s: RX overflow.\n", iface);
663 if (eisr & EISR_RXBUFOFLO)
664 printk(KERN_ERR "%s: RX buffer overflow.\n", iface);
665 if (eisr & EISR_RXMEMERR)
666 printk(KERN_ERR "%s: RX PCI error.\n", iface);
667 if (eisr & EISR_RXPARERR)
668 printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface);
669 if (eisr & EISR_TXBUFUFLO)
670 printk(KERN_ERR "%s: TX buffer underflow.\n", iface);
671 if (eisr & EISR_TXMEMERR)
672 printk(KERN_ERR "%s: TX PCI error.\n", iface);
678 dev->trans_start = jiffies;
679 netif_wake_queue(dev);
682 /* The interrupt handler does all of the Rx thread work and cleans up
683 after the Tx thread. */
684 static void ioc3_interrupt(int irq, void *_dev, struct pt_regs *regs)
686 struct net_device *dev = (struct net_device *)_dev;
687 struct ioc3_private *ip = dev->priv;
688 struct ioc3 *ioc3 = ip->regs;
689 const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
690 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
691 EISR_TXEXPLICIT | EISR_TXMEMERR;
694 eisr = ioc3->eisr & enabled;
698 ioc3->eisr; /* Flush */
700 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
701 EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
702 ioc3_error(ip, eisr);
703 if (eisr & EISR_RXTIMERINT)
705 if (eisr & EISR_TXEXPLICIT)
708 eisr = ioc3->eisr & enabled;
713 * Auto negotiation. The scheme is very simple. We have a timer routine that
714 * keeps watching the auto negotiation process as it progresses. The DP83840
715 * is first told to start doing it's thing, we set up the time and place the
716 * timer state machine in it's initial state.
718 * Here the timer peeks at the DP83840 status registers at each click to see
719 * if the auto negotiation has completed, we assume here that the DP83840 PHY
720 * will time out at some point and just tell us what (didn't) happen. For
721 * complete coverage we only allow so many of the ticks at this level to run,
722 * when this has expired we print a warning message and try another strategy.
723 * This "other" strategy is to force the interface into various speed/duplex
724 * configurations and we stop when we see a link-up condition before the
725 * maximum number of "peek" ticks have occurred.
727 * Once a valid link status has been detected we configure the IOC3 to speak
728 * the most efficient protocol we could get a clean link for. The priority
729 * for link configurations, highest first is:
731 * 100 Base-T Full Duplex
732 * 100 Base-T Half Duplex
733 * 10 Base-T Full Duplex
734 * 10 Base-T Half Duplex
736 * We start a new timer now, after a successful auto negotiation status has
737 * been detected. This timer just waits for the link-up bit to get set in
738 * the BMCR of the DP83840. When this occurs we print a kernel log message
739 * describing the link type in use and the fact that it is up.
741 * If a fatal error of some sort is signalled and detected in the interrupt
742 * service routine, and the chip is reset, or the link is ifconfig'd down
743 * and then back up, this entire process repeats itself all over again.
745 static int ioc3_try_next_permutation(struct ioc3_private *ip)
747 ip->sw_bmcr = mii_read(ip, MII_BMCR);
749 /* Downgrade from full to half duplex. Only possible via ethtool. */
750 if (ip->sw_bmcr & BMCR_FULLDPLX) {
751 ip->sw_bmcr &= ~BMCR_FULLDPLX;
752 mii_write(ip, MII_BMCR, ip->sw_bmcr);
757 /* Downgrade from 100 to 10. */
758 if (ip->sw_bmcr & BMCR_SPEED100) {
759 ip->sw_bmcr &= ~BMCR_SPEED100;
760 mii_write(ip, MII_BMCR, ip->sw_bmcr);
765 /* We've tried everything. */
770 ioc3_display_link_mode(struct ioc3_private *ip)
774 ip->sw_lpa = mii_read(ip, MII_LPA);
776 if (ip->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
777 if (ip->sw_lpa & LPA_100FULL)
778 tmode = "100Mb/s, Full Duplex";
780 tmode = "100Mb/s, Half Duplex";
782 if (ip->sw_lpa & LPA_10FULL)
783 tmode = "10Mb/s, Full Duplex";
785 tmode = "10Mb/s, Half Duplex";
788 printk(KERN_INFO "%s: Link is up at %s.\n", ip->dev->name, tmode);
792 ioc3_display_forced_link_mode(struct ioc3_private *ip)
794 char *speed = "", *duplex = "";
796 ip->sw_bmcr = mii_read(ip, MII_BMCR);
797 if (ip->sw_bmcr & BMCR_SPEED100)
801 if (ip->sw_bmcr & BMCR_FULLDPLX)
802 duplex = "Full Duplex.\n";
804 duplex = "Half Duplex.\n";
806 printk(KERN_INFO "%s: Link has been forced up at %s%s", ip->dev->name,
810 static int ioc3_set_link_modes(struct ioc3_private *ip)
812 struct ioc3 *ioc3 = ip->regs;
816 * All we care about is making sure the bigmac tx_cfg has a
817 * proper duplex setting.
819 if (ip->timer_state == arbwait) {
820 ip->sw_lpa = mii_read(ip, MII_LPA);
821 if (!(ip->sw_lpa & (LPA_10HALF | LPA_10FULL |
822 LPA_100HALF | LPA_100FULL)))
824 if (ip->sw_lpa & LPA_100FULL)
826 else if (ip->sw_lpa & LPA_100HALF)
828 else if (ip->sw_lpa & LPA_10FULL)
833 /* Forcing a link mode. */
834 ip->sw_bmcr = mii_read(ip, MII_BMCR);
835 if (ip->sw_bmcr & BMCR_FULLDPLX)
842 ip->emcr |= EMCR_DUPLEX;
844 ip->emcr &= ~EMCR_DUPLEX;
846 ioc3->emcr = ip->emcr;
856 static int is_lucent_phy(struct ioc3_private *ip)
858 unsigned short mr2, mr3;
861 mr2 = mii_read(ip, MII_PHYSID1);
862 mr3 = mii_read(ip, MII_PHYSID2);
863 if ((mr2 & 0xffff) == 0x0180 && ((mr3 & 0xffff) >> 10) == 0x1d) {
870 static void ioc3_timer(unsigned long data)
872 struct ioc3_private *ip = (struct ioc3_private *) data;
873 int restart_timer = 0;
876 switch (ip->timer_state) {
879 * Only allow for 5 ticks, thats 10 seconds and much too
880 * long to wait for arbitration to complete.
882 if (ip->timer_ticks >= 10) {
883 /* Enter force mode. */
885 ip->sw_bmcr = mii_read(ip, MII_BMCR);
886 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful,"
887 " trying force link mode\n", ip->dev->name);
888 ip->sw_bmcr = BMCR_SPEED100;
889 mii_write(ip, MII_BMCR, ip->sw_bmcr);
891 if (!is_lucent_phy(ip)) {
893 * OK, seems we need do disable the transceiver
894 * for the first tick to make sure we get an
895 * accurate link state at the second tick.
897 ip->sw_csconfig = mii_read(ip, MII_CSCONFIG);
898 ip->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
899 mii_write(ip, MII_CSCONFIG, ip->sw_csconfig);
901 ip->timer_state = ltrywait;
905 /* Anything interesting happen? */
906 ip->sw_bmsr = mii_read(ip, MII_BMSR);
907 if (ip->sw_bmsr & BMSR_ANEGCOMPLETE) {
910 /* Just what we've been waiting for... */
911 ret = ioc3_set_link_modes(ip);
913 /* Ooops, something bad happened, go to
916 * XXX Broken hubs which don't support
917 * XXX 802.3u auto-negotiation make this
918 * XXX happen as well.
924 * Success, at least so far, advance our state
927 ip->timer_state = lupwait;
937 * Auto negotiation was successful and we are awaiting a
938 * link up status. I have decided to let this timer run
939 * forever until some sort of error is signalled, reporting
940 * a message to the user at 10 second intervals.
942 ip->sw_bmsr = mii_read(ip, MII_BMSR);
943 if (ip->sw_bmsr & BMSR_LSTATUS) {
945 * Wheee, it's up, display the link mode in use and put
946 * the timer to sleep.
948 ioc3_display_link_mode(ip);
949 ip->timer_state = asleep;
952 if (ip->timer_ticks >= 10) {
953 printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
954 "not completely up.\n", ip->dev->name);
965 * Making the timeout here too long can make it take
966 * annoyingly long to attempt all of the link mode
967 * permutations, but then again this is essentially
968 * error recovery code for the most part.
970 ip->sw_bmsr = mii_read(ip, MII_BMSR);
971 ip->sw_csconfig = mii_read(ip, MII_CSCONFIG);
972 if (ip->timer_ticks == 1) {
973 if (!is_lucent_phy(ip)) {
975 * Re-enable transceiver, we'll re-enable the
976 * transceiver next tick, then check link state
977 * on the following tick.
979 ip->sw_csconfig |= CSCONFIG_TCVDISAB;
980 mii_write(ip, MII_CSCONFIG, ip->sw_csconfig);
985 if (ip->timer_ticks == 2) {
986 if (!is_lucent_phy(ip)) {
987 ip->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
988 mii_write(ip, MII_CSCONFIG, ip->sw_csconfig);
993 if (ip->sw_bmsr & BMSR_LSTATUS) {
994 /* Force mode selection success. */
995 ioc3_display_forced_link_mode(ip);
996 ioc3_set_link_modes(ip); /* XXX error? then what? */
997 ip->timer_state = asleep;
1000 if (ip->timer_ticks >= 4) { /* 6 seconds or so... */
1003 ret = ioc3_try_next_permutation(ip);
1006 * Aieee, tried them all, reset the
1007 * chip and try all over again.
1009 printk(KERN_NOTICE "%s: Link down, "
1016 if (!is_lucent_phy(ip)) {
1017 ip->sw_csconfig = mii_read(ip,
1019 ip->sw_csconfig |= CSCONFIG_TCVDISAB;
1020 mii_write(ip, MII_CSCONFIG,
1023 ip->timer_ticks = 0;
1033 /* Can't happens.... */
1034 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got "
1035 "one anyways!\n", ip->dev->name);
1037 ip->timer_ticks = 0;
1038 ip->timer_state = asleep; /* foo on you */
1042 if (restart_timer) {
1043 ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2s */
1044 add_timer(&ip->ioc3_timer);
1049 ioc3_start_auto_negotiation(struct ioc3_private *ip, struct ethtool_cmd *ep)
1053 /* Read all of the registers we are interested in now. */
1054 ip->sw_bmsr = mii_read(ip, MII_BMSR);
1055 ip->sw_bmcr = mii_read(ip, MII_BMCR);
1056 ip->sw_physid1 = mii_read(ip, MII_PHYSID1);
1057 ip->sw_physid2 = mii_read(ip, MII_PHYSID2);
1059 /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
1061 ip->sw_advertise = mii_read(ip, MII_ADVERTISE);
1062 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
1063 /* Advertise everything we can support. */
1064 if (ip->sw_bmsr & BMSR_10HALF)
1065 ip->sw_advertise |= ADVERTISE_10HALF;
1067 ip->sw_advertise &= ~ADVERTISE_10HALF;
1069 if (ip->sw_bmsr & BMSR_10FULL)
1070 ip->sw_advertise |= ADVERTISE_10FULL;
1072 ip->sw_advertise &= ~ADVERTISE_10FULL;
1073 if (ip->sw_bmsr & BMSR_100HALF)
1074 ip->sw_advertise |= ADVERTISE_100HALF;
1076 ip->sw_advertise &= ~ADVERTISE_100HALF;
1077 if (ip->sw_bmsr & BMSR_100FULL)
1078 ip->sw_advertise |= ADVERTISE_100FULL;
1080 ip->sw_advertise &= ~ADVERTISE_100FULL;
1081 mii_write(ip, MII_ADVERTISE, ip->sw_advertise);
1084 * XXX Currently no IOC3 card I know off supports 100BaseT4,
1085 * XXX and this is because the DP83840 does not support it,
1086 * XXX changes XXX would need to be made to the tx/rx logic in
1087 * XXX the driver as well so I completely skip checking for it
1088 * XXX in the BMSR for now.
1091 #ifdef AUTO_SWITCH_DEBUG
1092 ASD(("%s: Advertising [ ", ip->dev->name));
1093 if (ip->sw_advertise & ADVERTISE_10HALF)
1095 if (ip->sw_advertise & ADVERTISE_10FULL)
1097 if (ip->sw_advertise & ADVERTISE_100HALF)
1099 if (ip->sw_advertise & ADVERTISE_100FULL)
1103 /* Enable Auto-Negotiation, this is usually on already... */
1104 ip->sw_bmcr |= BMCR_ANENABLE;
1105 mii_write(ip, MII_BMCR, ip->sw_bmcr);
1107 /* Restart it to make sure it is going. */
1108 ip->sw_bmcr |= BMCR_ANRESTART;
1109 mii_write(ip, MII_BMCR, ip->sw_bmcr);
1111 /* BMCR_ANRESTART self clears when the process has begun. */
1113 timeout = 64; /* More than enough. */
1115 ip->sw_bmcr = mii_read(ip, MII_BMCR);
1116 if (!(ip->sw_bmcr & BMCR_ANRESTART))
1117 break; /* got it. */
1121 printk(KERN_ERR "%s: IOC3 would not start auto "
1122 "negotiation BMCR=0x%04x\n",
1123 ip->dev->name, ip->sw_bmcr);
1124 printk(KERN_NOTICE "%s: Performing force link "
1125 "detection.\n", ip->dev->name);
1128 ip->timer_state = arbwait;
1133 * Force the link up, trying first a particular mode. Either
1134 * we are here at the request of ethtool or because the IOC3
1135 * would not start to autoneg.
1139 * Disable auto-negotiation in BMCR, enable the duplex and
1140 * speed setting, init the timer state machine, and fire it off.
1142 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
1143 ip->sw_bmcr = BMCR_SPEED100;
1145 if (ep->speed == SPEED_100)
1146 ip->sw_bmcr = BMCR_SPEED100;
1149 if (ep->duplex == DUPLEX_FULL)
1150 ip->sw_bmcr |= BMCR_FULLDPLX;
1152 mii_write(ip, MII_BMCR, ip->sw_bmcr);
1154 if (!is_lucent_phy(ip)) {
1156 * OK, seems we need do disable the transceiver for the
1157 * first tick to make sure we get an accurate link
1158 * state at the second tick.
1160 ip->sw_csconfig = mii_read(ip, MII_CSCONFIG);
1161 ip->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
1162 mii_write(ip, MII_CSCONFIG, ip->sw_csconfig);
1164 ip->timer_state = ltrywait;
1167 del_timer(&ip->ioc3_timer);
1168 ip->timer_ticks = 0;
1169 ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
1170 ip->ioc3_timer.data = (unsigned long) ip;
1171 ip->ioc3_timer.function = &ioc3_timer;
1172 add_timer(&ip->ioc3_timer);
1175 static int ioc3_mii_init(struct ioc3_private *ip)
1181 spin_lock_irq(&ip->ioc3_lock);
1182 for (i = 0; i < 32; i++) {
1184 word = mii_read(ip, 2);
1185 if ((word != 0xffff) && (word != 0x0000)) {
1187 break; /* Found a PHY */
1191 spin_unlock_irq(&ip->ioc3_lock);
1195 ioc3_start_auto_negotiation(ip, NULL); // XXX ethtool
1197 spin_unlock_irq(&ip->ioc3_lock);
1203 ioc3_clean_rx_ring(struct ioc3_private *ip)
1205 struct sk_buff *skb;
1208 for (i = ip->rx_ci; i & 15; i++) {
1209 ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci];
1210 ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++];
1215 for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) {
1216 struct ioc3_erxbuf *rxb;
1217 skb = ip->rx_skbs[i];
1218 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
1224 ioc3_clean_tx_ring(struct ioc3_private *ip)
1226 struct sk_buff *skb;
1229 for (i=0; i < 128; i++) {
1230 skb = ip->tx_skbs[i];
1232 ip->tx_skbs[i] = NULL;
1233 dev_kfree_skb_any(skb);
1242 ioc3_free_rings(struct ioc3_private *ip)
1244 struct sk_buff *skb;
1245 int rx_entry, n_entry;
1248 ioc3_clean_tx_ring(ip);
1249 free_pages((unsigned long)ip->txr, 2);
1254 n_entry = ip->rx_ci;
1255 rx_entry = ip->rx_pi;
1257 while (n_entry != rx_entry) {
1258 skb = ip->rx_skbs[n_entry];
1260 dev_kfree_skb_any(skb);
1262 n_entry = (n_entry + 1) & 511;
1264 free_page((unsigned long)ip->rxr);
1270 ioc3_alloc_rings(struct net_device *dev, struct ioc3_private *ip,
1273 struct ioc3_erxbuf *rxb;
1277 if (ip->rxr == NULL) {
1278 /* Allocate and initialize rx ring. 4kb = 512 entries */
1279 ip->rxr = (unsigned long *) get_free_page(GFP_ATOMIC);
1280 rxr = (unsigned long *) ip->rxr;
1282 printk("ioc3_alloc_rings(): get_free_page() failed!\n");
1284 /* Now the rx buffers. The RX ring may be larger but
1285 we only allocate 16 buffers for now. Need to tune
1286 this for performance and memory later. */
1287 for (i = 0; i < RX_BUFFS; i++) {
1288 struct sk_buff *skb;
1290 skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1296 ip->rx_skbs[i] = skb;
1299 /* Because we reserve afterwards. */
1300 skb_put(skb, (1664 + RX_OFFSET));
1301 rxb = (struct ioc3_erxbuf *) skb->data;
1302 rxr[i] = cpu_to_be64((0xa5UL << 56) |
1303 ((unsigned long) rxb & TO_PHYS_MASK));
1304 skb_reserve(skb, RX_OFFSET);
1307 ip->rx_pi = RX_BUFFS;
1310 if (ip->txr == NULL) {
1311 /* Allocate and initialize tx rings. 16kb = 128 bufs. */
1312 ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
1314 printk("ioc3_alloc_rings(): get_free_page() failed!\n");
1321 ioc3_init_rings(struct net_device *dev, struct ioc3_private *ip,
1326 ioc3_free_rings(ip);
1327 ioc3_alloc_rings(dev, ip, ioc3);
1329 ioc3_clean_rx_ring(ip);
1330 ioc3_clean_tx_ring(ip);
1332 /* Now the rx ring base, consume & produce registers. */
1333 ring = (0xa5UL << 56) | ((unsigned long)ip->rxr & TO_PHYS_MASK);
1334 ioc3->erbr_h = ring >> 32;
1335 ioc3->erbr_l = ring & 0xffffffff;
1336 ioc3->ercir = (ip->rx_ci << 3);
1337 ioc3->erpir = (ip->rx_pi << 3) | ERPIR_ARM;
1339 ring = (0xa5UL << 56) | ((unsigned long)ip->txr & TO_PHYS_MASK);
1341 ip->txqlen = 0; /* nothing queued */
1343 /* Now the tx ring base, consume & produce registers. */
1344 ioc3->etbr_h = ring >> 32;
1345 ioc3->etbr_l = ring & 0xffffffff;
1346 ioc3->etpir = (ip->tx_pi << 7);
1347 ioc3->etcir = (ip->tx_ci << 7);
1348 ioc3->etcir; /* Flush */
1352 ioc3_ssram_disc(struct ioc3_private *ip)
1354 struct ioc3 *ioc3 = ip->regs;
1355 volatile u32 *ssram0 = &ioc3->ssram[0x0000];
1356 volatile u32 *ssram1 = &ioc3->ssram[0x4000];
1357 unsigned int pattern = 0x5555;
1359 /* Assume the larger size SSRAM and enable parity checking */
1360 ioc3->emcr |= (EMCR_BUFSIZ | EMCR_RAMPAR);
1363 *ssram1 = ~pattern & IOC3_SSRAM_DM;
1365 if ((*ssram0 & IOC3_SSRAM_DM) != pattern ||
1366 (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
1367 /* set ssram size to 64 KB */
1368 ip->emcr = EMCR_RAMPAR;
1369 ioc3->emcr &= ~EMCR_BUFSIZ;
1371 ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR;
1375 static void ioc3_init(struct ioc3_private *ip)
1377 struct net_device *dev = ip->dev;
1378 struct ioc3 *ioc3 = ip->regs;
1380 del_timer(&ip->ioc3_timer); /* Kill if running */
1382 ioc3->emcr = EMCR_RST; /* Reset */
1383 ioc3->emcr; /* Flush WB */
1384 udelay(4); /* Give it time ... */
1388 /* Misc registers */
1390 ioc3->etcsr = (17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21;
1391 ioc3->etcdc; /* Clear on read */
1392 ioc3->ercsr = 15; /* RX low watermark */
1393 ioc3->ertr = 0; /* Interrupt immediately */
1394 ioc3->emar_h = (dev->dev_addr[5] << 8) | dev->dev_addr[4];
1395 ioc3->emar_l = (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) |
1396 (dev->dev_addr[1] << 8) | dev->dev_addr[0];
1397 ioc3->ehar_h = ip->ehar_h;
1398 ioc3->ehar_l = ip->ehar_l;
1399 ioc3->ersr = 42; /* XXX should be random */
1401 ioc3_init_rings(ip->dev, ip, ioc3);
1403 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
1404 EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN;
1405 ioc3->emcr = ip->emcr;
1406 ioc3->eier = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
1407 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
1408 EISR_TXEXPLICIT | EISR_TXMEMERR;
1412 static inline void ioc3_stop(struct ioc3_private *ip)
1414 struct ioc3 *ioc3 = ip->regs;
1416 ioc3->emcr = 0; /* Shutup */
1417 ioc3->eier = 0; /* Disable interrupts */
1418 ioc3->eier; /* Flush */
1422 ioc3_open(struct net_device *dev)
1424 struct ioc3_private *ip = dev->priv;
1426 if (request_irq(dev->irq, ioc3_interrupt, SA_SHIRQ, ioc3_str, dev)) {
1427 printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
1436 netif_start_queue(dev);
1441 ioc3_close(struct net_device *dev)
1443 struct ioc3_private *ip = dev->priv;
1445 del_timer(&ip->ioc3_timer);
1447 netif_stop_queue(dev);
1450 free_irq(dev->irq, dev);
1452 ioc3_free_rings(ip);
1457 * MENET cards have four IOC3 chips, which are attached to two sets of
1458 * PCI slot resources each: the primary connections are on slots
1459 * 0..3 and the secondaries are on 4..7
1461 * All four ethernets are brought out to connectors; six serial ports
1462 * (a pair from each of the first three IOC3s) are brought out to
1463 * MiniDINs; all other subdevices are left swinging in the wind, leave
1466 static inline int ioc3_is_menet(struct pci_dev *pdev)
1468 struct pci_dev *dev;
1470 return pdev->bus->parent == NULL
1471 && (dev = pci_find_slot(pdev->bus->number, PCI_DEVFN(0, 0)))
1472 && dev->vendor == PCI_VENDOR_ID_SGI
1473 && dev->device == PCI_DEVICE_ID_SGI_IOC3
1474 && (dev = pci_find_slot(pdev->bus->number, PCI_DEVFN(1, 0)))
1475 && dev->vendor == PCI_VENDOR_ID_SGI
1476 && dev->device == PCI_DEVICE_ID_SGI_IOC3
1477 && (dev = pci_find_slot(pdev->bus->number, PCI_DEVFN(2, 0)))
1478 && dev->vendor == PCI_VENDOR_ID_SGI
1479 && dev->device == PCI_DEVICE_ID_SGI_IOC3;
1482 static void inline ioc3_serial_probe(struct pci_dev *pdev,
1485 struct serial_struct req;
1488 * We need to recognice and treat the fourth MENET serial as it
1489 * does not have an SuperIO chip attached to it, therefore attempting
1490 * to access it will result in bus errors. We call something an
1491 * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3
1492 * in it. This is paranoid but we want to avoid blowing up on a
1493 * showhorn PCI box that happens to have 4 IOC3 cards in it so it's
1494 * not paranoid enough ...
1496 if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
1499 /* Register to interrupt zero because we share the interrupt with
1500 the serial driver which we don't properly support yet. */
1501 memset(&req, 0, sizeof(req));
1503 req.flags = IOC3_COM_FLAGS;
1504 req.io_type = SERIAL_IO_MEM;
1505 req.iomem_reg_shift = 0;
1506 req.baud_base = IOC3_BAUD;
1508 req.iomem_base = (unsigned char *) &ioc3->sregs.uarta;
1509 register_serial(&req);
1511 req.iomem_base = (unsigned char *) &ioc3->sregs.uartb;
1512 register_serial(&req);
1515 static int __devinit ioc3_probe(struct pci_dev *pdev,
1516 const struct pci_device_id *ent)
1518 struct net_device *dev = NULL;
1519 struct ioc3_private *ip;
1521 unsigned long ioc3_base, ioc3_size;
1522 u32 vendor, model, rev;
1525 dev = alloc_etherdev(sizeof(struct ioc3_private));
1529 err = pci_request_regions(pdev, "ioc3");
1533 SET_MODULE_OWNER(dev);
1537 dev->irq = pdev->irq;
1539 ioc3_base = pci_resource_start(pdev, 0);
1540 ioc3_size = pci_resource_len(pdev, 0);
1541 ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size);
1543 printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n",
1550 #ifdef CONFIG_SERIAL
1551 ioc3_serial_probe(pdev, ioc3);
1554 spin_lock_init(&ip->ioc3_lock);
1559 init_timer(&ip->ioc3_timer);
1562 if (ip->phy == -1) {
1563 printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
1569 ioc3_ssram_disc(ip);
1572 /* The IOC3-specific entries in the device structure. */
1573 dev->open = ioc3_open;
1574 dev->hard_start_xmit = ioc3_start_xmit;
1575 dev->tx_timeout = ioc3_timeout;
1576 dev->watchdog_timeo = 5 * HZ;
1577 dev->stop = ioc3_close;
1578 dev->get_stats = ioc3_get_stats;
1579 dev->do_ioctl = ioc3_ioctl;
1580 dev->set_multicast_list = ioc3_set_multicast_list;
1582 err = register_netdev(dev);
1586 vendor = (ip->sw_physid1 << 12) | (ip->sw_physid2 >> 4);
1587 model = (ip->sw_physid2 >> 4) & 0x3f;
1588 rev = ip->sw_physid2 & 0xf;
1589 printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, "
1590 "rev %d.\n", dev->name, ip->phy, vendor, model, rev);
1591 printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name,
1592 ip->emcr & EMCR_BUFSIZ ? 128 : 64);
1598 free_irq(dev->irq, dev);
1599 ioc3_free_rings(ip);
1601 pci_release_regions(pdev);
1607 static void __devexit ioc3_remove_one (struct pci_dev *pdev)
1609 struct net_device *dev = pci_get_drvdata(pdev);
1610 struct ioc3_private *ip = dev->priv;
1611 struct ioc3 *ioc3 = ip->regs;
1614 pci_release_regions(pdev);
1618 static struct pci_device_id ioc3_pci_tbl[] __devinitdata = {
1619 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
1622 MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
1624 static struct pci_driver ioc3_driver = {
1626 id_table: ioc3_pci_tbl,
1628 remove: __devexit_p(ioc3_remove_one),
1631 static int __init ioc3_init_module(void)
1633 return pci_module_init(&ioc3_driver);
1636 static void __exit ioc3_cleanup_module(void)
1638 pci_unregister_driver(&ioc3_driver);
1642 ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1645 struct ioc3_private *ip = dev->priv;
1646 struct ioc3 *ioc3 = ip->regs;
1648 struct ioc3_etxd *desc;
1651 spin_lock_irq(&ip->ioc3_lock);
1653 data = (unsigned long) skb->data;
1656 produce = ip->tx_pi;
1657 desc = &ip->txr[produce];
1660 /* Short packet, let's copy it directly into the ring. */
1661 memcpy(desc->data, skb->data, skb->len);
1662 if (len < ETH_ZLEN) {
1663 /* Very short packet, pad with zeros at the end. */
1664 memset(desc->data + len, 0, ETH_ZLEN - len);
1667 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V);
1668 desc->bufcnt = cpu_to_be32(len);
1669 } else if ((data ^ (data + len)) & 0x4000) {
1670 unsigned long b2, s1, s2;
1672 b2 = (data | 0x3fffUL) + 1UL;
1674 s2 = data + len - b2;
1676 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE |
1677 ETXD_B1V | ETXD_B2V);
1678 desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT)
1679 | (s2 << ETXD_B2CNT_SHIFT));
1680 desc->p1 = cpu_to_be64((0xa5UL << 56) |
1681 (data & TO_PHYS_MASK));
1682 desc->p2 = cpu_to_be64((0xa5UL << 56) |
1683 (data & TO_PHYS_MASK));
1685 /* Normal sized packet that doesn't cross a page boundary. */
1686 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V);
1687 desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
1688 desc->p1 = cpu_to_be64((0xa5UL << 56) |
1689 (data & TO_PHYS_MASK));
1694 dev->trans_start = jiffies;
1695 ip->tx_skbs[produce] = skb; /* Remember skb */
1696 produce = (produce + 1) & 127;
1697 ip->tx_pi = produce;
1698 ioc3->etpir = produce << 7; /* Fire ... */
1702 if (ip->txqlen > 127)
1703 netif_stop_queue(dev);
1705 spin_unlock_irq(&ip->ioc3_lock);
1710 static void ioc3_timeout(struct net_device *dev)
1712 struct ioc3_private *ip = dev->priv;
1714 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
1720 dev->trans_start = jiffies;
1721 netif_wake_queue(dev);
1725 * Given a multicast ethernet address, this routine calculates the
1726 * address's bit index in the logical address filter mask
1729 static inline unsigned int
1730 ioc3_hash(const unsigned char *addr)
1732 unsigned int temp = 0;
1737 crc = ether_crc_le(ETH_ALEN, addr);
1739 crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */
1740 for (bits = 6; --bits >= 0; ) {
1742 temp |= (crc & 0x1);
1750 /* We provide both the mii-tools and the ethtool ioctls. */
1751 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1753 struct ioc3_private *ip = dev->priv;
1754 struct ethtool_cmd *ep_user = (struct ethtool_cmd *) rq->ifr_data;
1755 u16 *data = (u16 *)&rq->ifr_data;
1756 struct ioc3 *ioc3 = ip->regs;
1757 struct ethtool_cmd ecmd;
1760 case SIOCGMIIPHY: /* Get the address of the PHY in use. */
1766 case SIOCGMIIREG: { /* Read a PHY register. */
1767 unsigned int phy = data[0];
1768 unsigned int reg = data[1];
1770 if (phy > 0x1f || reg > 0x1f)
1773 spin_lock_irq(&ip->ioc3_lock);
1774 while (ioc3->micr & MICR_BUSY);
1775 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG;
1776 while (ioc3->micr & MICR_BUSY);
1777 data[3] = (ioc3->midr_r & MIDR_DATA_MASK);
1778 spin_unlock_irq(&ip->ioc3_lock);
1782 case SIOCSMIIREG: /* Write a PHY register. */
1786 if (!capable(CAP_NET_ADMIN))
1789 if (phy > 0x1f || reg > 0x1f)
1792 spin_lock_irq(&ip->ioc3_lock);
1793 while (ioc3->micr & MICR_BUSY);
1794 ioc3->midr_w = data[2];
1795 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg;
1796 while (ioc3->micr & MICR_BUSY);
1797 spin_unlock_irq(&ip->ioc3_lock);
1802 if (copy_from_user(&ecmd, ep_user, sizeof(ecmd)))
1805 if (ecmd.cmd == ETHTOOL_GSET) {
1807 (SUPPORTED_10baseT_Half |
1808 SUPPORTED_10baseT_Full |
1809 SUPPORTED_100baseT_Half |
1810 SUPPORTED_100baseT_Full | SUPPORTED_Autoneg |
1811 SUPPORTED_TP | SUPPORTED_MII);
1813 ecmd.port = PORT_TP;
1814 ecmd.transceiver = XCVR_INTERNAL;
1815 ecmd.phy_address = ip->phy;
1817 /* Record PHY settings. */
1818 spin_lock_irq(&ip->ioc3_lock);
1819 ip->sw_bmcr = mii_read(ip, MII_BMCR);
1820 ip->sw_lpa = mii_read(ip, MII_LPA);
1821 spin_unlock_irq(&ip->ioc3_lock);
1822 if (ip->sw_bmcr & BMCR_ANENABLE) {
1823 ecmd.autoneg = AUTONEG_ENABLE;
1824 ecmd.speed = (ip->sw_lpa &
1825 (LPA_100HALF | LPA_100FULL)) ?
1826 SPEED_100 : SPEED_10;
1827 if (ecmd.speed == SPEED_100)
1828 ecmd.duplex = (ip->sw_lpa & (LPA_100FULL)) ?
1829 DUPLEX_FULL : DUPLEX_HALF;
1831 ecmd.duplex = (ip->sw_lpa & (LPA_10FULL)) ?
1832 DUPLEX_FULL : DUPLEX_HALF;
1834 ecmd.autoneg = AUTONEG_DISABLE;
1835 ecmd.speed = (ip->sw_bmcr & BMCR_SPEED100) ?
1836 SPEED_100 : SPEED_10;
1837 ecmd.duplex = (ip->sw_bmcr & BMCR_FULLDPLX) ?
1838 DUPLEX_FULL : DUPLEX_HALF;
1840 if (copy_to_user(ep_user, &ecmd, sizeof(ecmd)))
1843 } else if (ecmd.cmd == ETHTOOL_SSET) {
1844 if (!capable(CAP_NET_ADMIN))
1847 /* Verify the settings we care about. */
1848 if (ecmd.autoneg != AUTONEG_ENABLE &&
1849 ecmd.autoneg != AUTONEG_DISABLE)
1852 if (ecmd.autoneg == AUTONEG_DISABLE &&
1853 ((ecmd.speed != SPEED_100 &&
1854 ecmd.speed != SPEED_10) ||
1855 (ecmd.duplex != DUPLEX_HALF &&
1856 ecmd.duplex != DUPLEX_FULL)))
1859 /* Ok, do it to it. */
1860 del_timer(&ip->ioc3_timer);
1861 spin_lock_irq(&ip->ioc3_lock);
1862 ioc3_start_auto_negotiation(ip, &ecmd);
1863 spin_unlock_irq(&ip->ioc3_lock);
1874 static void ioc3_set_multicast_list(struct net_device *dev)
1876 struct dev_mc_list *dmi = dev->mc_list;
1877 struct ioc3_private *ip = dev->priv;
1878 struct ioc3 *ioc3 = ip->regs;
1882 netif_stop_queue(dev); /* Lock out others. */
1884 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1885 /* Unconditionally log net taps. */
1886 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1887 ip->emcr |= EMCR_PROMISC;
1888 ioc3->emcr = ip->emcr;
1891 ip->emcr &= ~EMCR_PROMISC;
1892 ioc3->emcr = ip->emcr; /* Clear promiscuous. */
1895 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1896 /* Too many for hashing to make sense or we want all
1897 multicast packets anyway, so skip computing all the
1898 hashes and just accept all packets. */
1899 ip->ehar_h = 0xffffffff;
1900 ip->ehar_l = 0xffffffff;
1902 for (i = 0; i < dev->mc_count; i++) {
1903 char *addr = dmi->dmi_addr;
1909 ehar |= (1UL << ioc3_hash(addr));
1911 ip->ehar_h = ehar >> 32;
1912 ip->ehar_l = ehar & 0xffffffff;
1914 ioc3->ehar_h = ip->ehar_h;
1915 ioc3->ehar_l = ip->ehar_l;
1918 netif_wake_queue(dev); /* Let us get going again. */
1921 MODULE_AUTHOR("Ralf Baechle <ralf@oss.sgi.com>");
1922 MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1923 MODULE_LICENSE("GPL");
1925 module_init(ioc3_init_module);
1926 module_exit(ioc3_cleanup_module);