2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1998 Randy Gobbel.
7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8 * dynamic procfs inode.
10 #include <linux/config.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/string.h>
17 #include <linux/timer.h>
18 #include <linux/proc_fs.h>
19 #include <linux/init.h>
20 #include <linux/crc32.h>
22 #include <asm/dbdma.h>
25 #include <asm/pgtable.h>
26 #include <asm/machdep.h>
27 #include <asm/pmac_feature.h>
29 #ifdef CONFIG_PMAC_PBOOK
30 #include <linux/adb.h>
31 #include <linux/pmu.h>
32 #endif /* CONFIG_PMAC_PBOOK */
35 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
36 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
39 * CRC polynomial - used in working out multicast filter bits.
41 #define ENET_CRCPOLY 0x04c11db7
43 /* switch to use multicast code lifted from sunhme driver */
44 #define SUNHME_MULTICAST
48 #define MAX_TX_ACTIVE 1
50 #define ETHERMINPACKET 64
52 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
53 #define TX_TIMEOUT HZ /* 1 second */
55 /* Bits in transmit DMA status */
56 #define TX_DMA_ERR 0x80
61 /* volatile struct bmac *bmac; */
62 struct sk_buff_head *queue;
63 volatile struct dbdma_regs *tx_dma;
65 volatile struct dbdma_regs *rx_dma;
67 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
68 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
69 struct device_node *node;
71 struct sk_buff *rx_bufs[N_RX_RING];
74 struct sk_buff *tx_bufs[N_TX_RING];
77 unsigned char tx_fullup;
78 struct net_device_stats stats;
79 struct timer_list tx_timeout;
83 unsigned short hash_use_count[64];
84 unsigned short hash_table_mask[4];
85 struct net_device *next_bmac;
88 typedef struct bmac_reg_entry {
90 unsigned short reg_offset;
93 #define N_REG_ENTRIES 31
95 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
97 {"MEMDATAHI", MEMDATAHI},
98 {"MEMDATALO", MEMDATALO},
129 static struct net_device *bmac_devs;
130 static unsigned char *bmac_emergency_rxbuf;
132 #ifdef CONFIG_PMAC_PBOOK
133 static int bmac_sleep_notify(struct pmu_sleep_notifier *self, int when);
134 static struct pmu_sleep_notifier bmac_sleep_notifier = {
135 bmac_sleep_notify, SLEEP_LEVEL_NET,
140 * Number of bytes of private data per BMAC: allow enough for
141 * the rx and tx dma commands plus a branch dma command each,
142 * and another 16 bytes to allow us to align the dma command
143 * buffers on a 16 byte boundary.
145 #define PRIV_BYTES (sizeof(struct bmac_data) \
146 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
147 + sizeof(struct sk_buff_head))
149 static unsigned char bitrev(unsigned char b);
150 static void bmac_probe1(struct device_node *bmac, int is_bmac_plus);
151 static int bmac_open(struct net_device *dev);
152 static int bmac_close(struct net_device *dev);
153 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
154 static struct net_device_stats *bmac_stats(struct net_device *dev);
155 static void bmac_set_multicast(struct net_device *dev);
156 static void bmac_reset_and_enable(struct net_device *dev);
157 static void bmac_start_chip(struct net_device *dev);
158 static void bmac_init_chip(struct net_device *dev);
159 static void bmac_init_registers(struct net_device *dev);
160 static void bmac_enable_and_reset_chip(struct net_device *dev);
161 static int bmac_set_address(struct net_device *dev, void *addr);
162 static void bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs);
163 static void bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs);
164 static void bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs);
165 static void bmac_set_timeout(struct net_device *dev);
166 static void bmac_tx_timeout(unsigned long data);
167 static int bmac_proc_info ( char *buffer, char **start, off_t offset, int length);
168 static int bmac_output(struct sk_buff *skb, struct net_device *dev);
169 static void bmac_start(struct net_device *dev);
171 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
172 #define DBDMA_CLEAR(x) ( (x) << 16)
175 dbdma_st32(volatile unsigned long *a, unsigned long x)
177 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
181 static inline unsigned long
182 dbdma_ld32(volatile unsigned long *a)
185 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
190 dbdma_continue(volatile struct dbdma_regs *dmap)
192 dbdma_st32((volatile unsigned long *)&dmap->control,
193 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
198 dbdma_reset(volatile struct dbdma_regs *dmap)
200 dbdma_st32((volatile unsigned long *)&dmap->control,
201 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
203 while (dbdma_ld32((volatile unsigned long *)&dmap->status) & RUN)
208 dbdma_setcmd(volatile struct dbdma_cmd *cp,
209 unsigned short cmd, unsigned count, unsigned long addr,
210 unsigned long cmd_dep)
212 out_le16(&cp->command, cmd);
213 out_le16(&cp->req_count, count);
214 out_le32(&cp->phy_addr, addr);
215 out_le32(&cp->cmd_dep, cmd_dep);
216 out_le16(&cp->xfer_status, 0);
217 out_le16(&cp->res_count, 0);
221 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
223 out_le16((void *)dev->base_addr + reg_offset, data);
228 volatile unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
230 return in_le16((void *)dev->base_addr + reg_offset);
234 bmac_enable_and_reset_chip(struct net_device *dev)
236 struct bmac_data *bp = (struct bmac_data *) dev->priv;
237 volatile struct dbdma_regs *rd = bp->rx_dma;
238 volatile struct dbdma_regs *td = bp->tx_dma;
245 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 1);
248 #define MIFDELAY udelay(10)
251 bmac_mif_readbits(struct net_device *dev, int nb)
253 unsigned int val = 0;
256 bmwrite(dev, MIFCSR, 0);
258 if (bmread(dev, MIFCSR) & 8)
260 bmwrite(dev, MIFCSR, 1);
263 bmwrite(dev, MIFCSR, 0);
265 bmwrite(dev, MIFCSR, 1);
271 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
276 b = (val & (1 << nb))? 6: 4;
277 bmwrite(dev, MIFCSR, b);
279 bmwrite(dev, MIFCSR, b|1);
285 bmac_mif_read(struct net_device *dev, unsigned int addr)
289 bmwrite(dev, MIFCSR, 4);
291 bmac_mif_writebits(dev, ~0U, 32);
292 bmac_mif_writebits(dev, 6, 4);
293 bmac_mif_writebits(dev, addr, 10);
294 bmwrite(dev, MIFCSR, 2);
296 bmwrite(dev, MIFCSR, 1);
298 val = bmac_mif_readbits(dev, 17);
299 bmwrite(dev, MIFCSR, 4);
305 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
307 bmwrite(dev, MIFCSR, 4);
309 bmac_mif_writebits(dev, ~0U, 32);
310 bmac_mif_writebits(dev, 5, 4);
311 bmac_mif_writebits(dev, addr, 10);
312 bmac_mif_writebits(dev, 2, 2);
313 bmac_mif_writebits(dev, val, 16);
314 bmac_mif_writebits(dev, 3, 2);
318 bmac_init_registers(struct net_device *dev)
320 struct bmac_data *bp = (struct bmac_data *) dev->priv;
321 volatile unsigned short regValue;
322 unsigned short *pWord16;
325 /* XXDEBUG(("bmac: enter init_registers\n")); */
327 bmwrite(dev, RXRST, RxResetValue);
328 bmwrite(dev, TXRST, TxResetBit);
334 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
335 } while ((regValue & TxResetBit) && i > 0);
337 if (!bp->is_bmac_plus) {
338 regValue = bmread(dev, XCVRIF);
339 regValue |= ClkBit | SerialMode | COLActiveLow;
340 bmwrite(dev, XCVRIF, regValue);
344 bmwrite(dev, RSEED, (unsigned short)0x1968);
346 regValue = bmread(dev, XIFC);
347 regValue |= TxOutputEnable;
348 bmwrite(dev, XIFC, regValue);
352 /* set collision counters to 0 */
353 bmwrite(dev, NCCNT, 0);
354 bmwrite(dev, NTCNT, 0);
355 bmwrite(dev, EXCNT, 0);
356 bmwrite(dev, LTCNT, 0);
358 /* set rx counters to 0 */
359 bmwrite(dev, FRCNT, 0);
360 bmwrite(dev, LECNT, 0);
361 bmwrite(dev, AECNT, 0);
362 bmwrite(dev, FECNT, 0);
363 bmwrite(dev, RXCV, 0);
365 /* set tx fifo information */
366 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
368 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
369 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
371 /* set rx fifo information */
372 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
373 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
375 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
376 bmread(dev, STATUS); /* read it just to clear it */
378 /* zero out the chip Hash Filter registers */
379 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
380 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
381 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
382 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
383 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
385 pWord16 = (unsigned short *)dev->dev_addr;
386 bmwrite(dev, MADD0, *pWord16++);
387 bmwrite(dev, MADD1, *pWord16++);
388 bmwrite(dev, MADD2, *pWord16);
390 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
392 bmwrite(dev, INTDISABLE, EnableNormal);
399 bmac_disable_interrupts(struct net_device *dev)
401 bmwrite(dev, INTDISABLE, DisableAll);
405 bmac_enable_interrupts(struct net_device *dev)
407 bmwrite(dev, INTDISABLE, EnableNormal);
413 bmac_start_chip(struct net_device *dev)
415 struct bmac_data *bp = (struct bmac_data *) dev->priv;
416 volatile struct dbdma_regs *rd = bp->rx_dma;
417 unsigned short oldConfig;
419 /* enable rx dma channel */
422 oldConfig = bmread(dev, TXCFG);
423 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
425 /* turn on rx plus any other bits already on (promiscuous possibly) */
426 oldConfig = bmread(dev, RXCFG);
427 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
432 bmac_init_phy(struct net_device *dev)
435 struct bmac_data *bp = (struct bmac_data *) dev->priv;
437 printk(KERN_DEBUG "phy registers:");
438 for (addr = 0; addr < 32; ++addr) {
440 printk("\n" KERN_DEBUG);
441 printk(" %.4x", bmac_mif_read(dev, addr));
444 if (bp->is_bmac_plus) {
445 unsigned int capable, ctrl;
447 ctrl = bmac_mif_read(dev, 0);
448 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
449 if (bmac_mif_read(dev, 4) != capable
450 || (ctrl & 0x1000) == 0) {
451 bmac_mif_write(dev, 4, capable);
452 bmac_mif_write(dev, 0, 0x1200);
454 bmac_mif_write(dev, 0, 0x1000);
459 bmac_init_chip(struct net_device *dev)
462 bmac_init_registers(dev);
465 #ifdef CONFIG_PMAC_PBOOK
467 bmac_sleep_notify(struct pmu_sleep_notifier *self, int when)
469 struct bmac_data *bp;
471 unsigned short config;
472 struct net_device* dev = bmac_devs;
476 return PBOOK_SLEEP_OK;
478 bp = (struct bmac_data *) dev->priv;
481 case PBOOK_SLEEP_REQUEST:
483 case PBOOK_SLEEP_REJECT:
485 case PBOOK_SLEEP_NOW:
486 netif_device_detach(dev);
487 /* prolly should wait for dma to finish & turn off the chip */
488 save_flags(flags); cli();
489 if (bp->timeout_active) {
490 del_timer(&bp->tx_timeout);
491 bp->timeout_active = 0;
493 disable_irq(dev->irq);
494 disable_irq(bp->tx_dma_intr);
495 disable_irq(bp->rx_dma_intr);
497 restore_flags(flags);
499 volatile struct dbdma_regs *rd = bp->rx_dma;
500 volatile struct dbdma_regs *td = bp->tx_dma;
502 config = bmread(dev, RXCFG);
503 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
504 config = bmread(dev, TXCFG);
505 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
506 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
507 /* disable rx and tx dma */
508 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
509 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
510 /* free some skb's */
511 for (i=0; i<N_RX_RING; i++) {
512 if (bp->rx_bufs[i] != NULL) {
513 dev_kfree_skb(bp->rx_bufs[i]);
514 bp->rx_bufs[i] = NULL;
517 for (i = 0; i<N_TX_RING; i++) {
518 if (bp->tx_bufs[i] != NULL) {
519 dev_kfree_skb(bp->tx_bufs[i]);
520 bp->tx_bufs[i] = NULL;
524 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
527 /* see if this is enough */
529 bmac_reset_and_enable(dev);
530 enable_irq(dev->irq);
531 enable_irq(bp->tx_dma_intr);
532 enable_irq(bp->rx_dma_intr);
533 netif_device_attach(dev);
536 return PBOOK_SLEEP_OK;
540 static int bmac_set_address(struct net_device *dev, void *addr)
542 unsigned char *p = addr;
543 unsigned short *pWord16;
547 XXDEBUG(("bmac: enter set_address\n"));
548 save_flags(flags); cli();
550 for (i = 0; i < 6; ++i) {
551 dev->dev_addr[i] = p[i];
553 /* load up the hardware address */
554 pWord16 = (unsigned short *)dev->dev_addr;
555 bmwrite(dev, MADD0, *pWord16++);
556 bmwrite(dev, MADD1, *pWord16++);
557 bmwrite(dev, MADD2, *pWord16);
559 restore_flags(flags);
560 XXDEBUG(("bmac: exit set_address\n"));
564 static inline void bmac_set_timeout(struct net_device *dev)
566 struct bmac_data *bp = (struct bmac_data *) dev->priv;
571 if (bp->timeout_active)
572 del_timer(&bp->tx_timeout);
573 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
574 bp->tx_timeout.function = bmac_tx_timeout;
575 bp->tx_timeout.data = (unsigned long) dev;
576 add_timer(&bp->tx_timeout);
577 bp->timeout_active = 1;
578 restore_flags(flags);
582 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
590 baddr = virt_to_bus(vaddr);
592 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
596 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
598 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
600 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
601 virt_to_bus(addr), 0);
604 /* Bit-reverse one byte of an ethernet hardware address. */
606 bitrev(unsigned char b)
610 for (i = 0; i < 8; ++i, b >>= 1)
611 d = (d << 1) | (b & 1);
617 bmac_init_tx_ring(struct bmac_data *bp)
619 volatile struct dbdma_regs *td = bp->tx_dma;
621 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
627 /* put a branch at the end of the tx command list */
628 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
629 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
633 out_le32(&td->wait_sel, 0x00200020);
634 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
638 bmac_init_rx_ring(struct bmac_data *bp)
640 volatile struct dbdma_regs *rd = bp->rx_dma;
644 /* initialize list of sk_buffs for receiving and set up recv dma */
645 memset((char *)bp->rx_cmds, 0,
646 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
647 for (i = 0; i < N_RX_RING; i++) {
648 if ((skb = bp->rx_bufs[i]) == NULL) {
649 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
653 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
659 /* Put a branch back to the beginning of the receive command list */
660 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
661 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
665 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
671 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
673 struct bmac_data *bp = (struct bmac_data *) dev->priv;
674 volatile struct dbdma_regs *td = bp->tx_dma;
677 /* see if there's a free slot in the tx ring */
678 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
679 /* bp->tx_empty, bp->tx_fill)); */
683 if (i == bp->tx_empty) {
684 netif_stop_queue(dev);
686 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
687 return -1; /* can't take it at the moment */
690 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
692 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
694 bp->tx_bufs[bp->tx_fill] = skb;
697 bp->stats.tx_bytes += skb->len;
704 static int rxintcount;
706 static void bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs)
708 struct net_device *dev = (struct net_device *) dev_id;
709 struct bmac_data *bp = (struct bmac_data *) dev->priv;
710 volatile struct dbdma_regs *rd = bp->rx_dma;
711 volatile struct dbdma_cmd *cp;
714 unsigned int residual;
718 save_flags(flags); cli();
720 if (++rxintcount < 10) {
721 XXDEBUG(("bmac_rxdma_intr\n"));
728 cp = &bp->rx_cmds[i];
729 stat = ld_le16(&cp->xfer_status);
730 residual = ld_le16(&cp->res_count);
731 if ((stat & ACTIVE) == 0)
733 nb = RX_BUFLEN - residual - 2;
734 if (nb < (ETHERMINPACKET - ETHERCRC)) {
736 bp->stats.rx_length_errors++;
737 bp->stats.rx_errors++;
739 skb = bp->rx_bufs[i];
740 bp->rx_bufs[i] = NULL;
746 skb->protocol = eth_type_trans(skb, dev);
748 dev->last_rx = jiffies;
749 ++bp->stats.rx_packets;
750 bp->stats.rx_bytes += nb;
752 ++bp->stats.rx_dropped;
754 dev->last_rx = jiffies;
755 if ((skb = bp->rx_bufs[i]) == NULL) {
756 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
758 skb_reserve(bp->rx_bufs[i], 2);
760 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
761 st_le16(&cp->res_count, 0);
762 st_le16(&cp->xfer_status, 0);
764 if (++i >= N_RX_RING) i = 0;
772 restore_flags(flags);
776 if (rxintcount < 10) {
777 XXDEBUG(("bmac_rxdma_intr done\n"));
781 static int txintcount;
783 static void bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs)
785 struct net_device *dev = (struct net_device *) dev_id;
786 struct bmac_data *bp = (struct bmac_data *) dev->priv;
787 volatile struct dbdma_cmd *cp;
791 save_flags(flags); cli();
793 if (txintcount++ < 10) {
794 XXDEBUG(("bmac_txdma_intr\n"));
797 /* del_timer(&bp->tx_timeout); */
798 /* bp->timeout_active = 0; */
801 cp = &bp->tx_cmds[bp->tx_empty];
802 stat = ld_le16(&cp->xfer_status);
803 if (txintcount < 10) {
804 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
806 if (!(stat & ACTIVE)) {
808 * status field might not have been filled by DBDMA
810 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
814 if (bp->tx_bufs[bp->tx_empty]) {
815 ++bp->stats.tx_packets;
816 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
818 bp->tx_bufs[bp->tx_empty] = NULL;
820 netif_wake_queue(dev);
821 if (++bp->tx_empty >= N_TX_RING)
823 if (bp->tx_empty == bp->tx_fill)
827 restore_flags(flags);
829 if (txintcount < 10) {
830 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
836 static struct net_device_stats *bmac_stats(struct net_device *dev)
838 struct bmac_data *p = (struct bmac_data *) dev->priv;
843 #ifndef SUNHME_MULTICAST
844 /* Real fast bit-reversal algorithm, 6-bit values */
845 static int reverse6[64] = {
846 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
847 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
848 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
849 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
850 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
851 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
852 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
853 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
857 crc416(unsigned int curval, unsigned short nxtval)
859 register unsigned int counter, cur = curval, next = nxtval;
860 register int high_crc_set, low_data_set;
863 next = ((next & 0x00FF) << 8) | (next >> 8);
865 /* Compute bit-by-bit */
866 for (counter = 0; counter < 16; ++counter) {
867 /* is high CRC bit set? */
868 if ((cur & 0x80000000) == 0) high_crc_set = 0;
869 else high_crc_set = 1;
873 if ((next & 0x0001) == 0) low_data_set = 0;
874 else low_data_set = 1;
879 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
885 bmac_crc(unsigned short *address)
889 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
890 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
891 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
892 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
898 * Add requested mcast addr to BMac's hash table filter.
903 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
908 if (!(*addr)) return;
909 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
910 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
911 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
913 mask = (unsigned char)1 << mask;
914 bp->hash_use_count[crc/16] |= mask;
918 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
923 /* Now, delete the address from the filter copy, as indicated */
924 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
925 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
926 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
927 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
929 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
930 bp->hash_table_mask[crc/16] &= mask;
934 * Sync the adapter with the software copy of the multicast mask
935 * (logical address filter).
939 bmac_rx_off(struct net_device *dev)
941 unsigned short rx_cfg;
943 rx_cfg = bmread(dev, RXCFG);
944 rx_cfg &= ~RxMACEnable;
945 bmwrite(dev, RXCFG, rx_cfg);
947 rx_cfg = bmread(dev, RXCFG);
948 } while (rx_cfg & RxMACEnable);
952 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
954 unsigned short rx_cfg;
956 rx_cfg = bmread(dev, RXCFG);
957 rx_cfg |= RxMACEnable;
958 if (hash_enable) rx_cfg |= RxHashFilterEnable;
959 else rx_cfg &= ~RxHashFilterEnable;
960 if (promisc_enable) rx_cfg |= RxPromiscEnable;
961 else rx_cfg &= ~RxPromiscEnable;
962 bmwrite(dev, RXRST, RxResetValue);
963 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
964 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
965 bmwrite(dev, RXCFG, rx_cfg );
970 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
972 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
973 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
974 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
975 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
980 bmac_add_multi(struct net_device *dev,
981 struct bmac_data *bp, unsigned char *addr)
983 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
984 bmac_addhash(bp, addr);
986 bmac_update_hash_table_mask(dev, bp);
987 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
988 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
992 bmac_remove_multi(struct net_device *dev,
993 struct bmac_data *bp, unsigned char *addr)
995 bmac_removehash(bp, addr);
997 bmac_update_hash_table_mask(dev, bp);
998 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
1002 /* Set or clear the multicast filter for this adaptor.
1003 num_addrs == -1 Promiscuous mode, receive all packets
1004 num_addrs == 0 Normal mode, clear multicast list
1005 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
1006 best-effort filtering.
1008 static void bmac_set_multicast(struct net_device *dev)
1010 struct dev_mc_list *dmi;
1011 struct bmac_data *bp = (struct bmac_data *) dev->priv;
1012 int num_addrs = dev->mc_count;
1013 unsigned short rx_cfg;
1019 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
1021 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1022 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
1023 bmac_update_hash_table_mask(dev, bp);
1024 rx_cfg = bmac_rx_on(dev, 1, 0);
1025 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
1026 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
1027 rx_cfg = bmread(dev, RXCFG);
1028 rx_cfg |= RxPromiscEnable;
1029 bmwrite(dev, RXCFG, rx_cfg);
1030 rx_cfg = bmac_rx_on(dev, 0, 1);
1031 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
1033 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
1034 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
1035 if (num_addrs == 0) {
1036 rx_cfg = bmac_rx_on(dev, 0, 0);
1037 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
1039 for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next)
1040 bmac_addhash(bp, dmi->dmi_addr);
1041 bmac_update_hash_table_mask(dev, bp);
1042 rx_cfg = bmac_rx_on(dev, 1, 0);
1043 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1046 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1048 #else /* ifdef SUNHME_MULTICAST */
1050 /* The version of set_multicast below was lifted from sunhme.c */
1052 static void bmac_set_multicast(struct net_device *dev)
1054 struct dev_mc_list *dmi = dev->mc_list;
1057 unsigned short rx_cfg;
1060 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1061 bmwrite(dev, BHASH0, 0xffff);
1062 bmwrite(dev, BHASH1, 0xffff);
1063 bmwrite(dev, BHASH2, 0xffff);
1064 bmwrite(dev, BHASH3, 0xffff);
1065 } else if(dev->flags & IFF_PROMISC) {
1066 rx_cfg = bmread(dev, RXCFG);
1067 rx_cfg |= RxPromiscEnable;
1068 bmwrite(dev, RXCFG, rx_cfg);
1072 rx_cfg = bmread(dev, RXCFG);
1073 rx_cfg &= ~RxPromiscEnable;
1074 bmwrite(dev, RXCFG, rx_cfg);
1076 for(i = 0; i < 4; i++) hash_table[i] = 0;
1078 for(i = 0; i < dev->mc_count; i++) {
1079 addrs = dmi->dmi_addr;
1085 crc = ether_crc_le(6, addrs);
1087 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1089 bmwrite(dev, BHASH0, hash_table[0]);
1090 bmwrite(dev, BHASH1, hash_table[1]);
1091 bmwrite(dev, BHASH2, hash_table[2]);
1092 bmwrite(dev, BHASH3, hash_table[3]);
1095 #endif /* SUNHME_MULTICAST */
1097 static int miscintcount;
1099 static void bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs)
1101 struct net_device *dev = (struct net_device *) dev_id;
1102 struct bmac_data *bp = (struct bmac_data *)dev->priv;
1103 unsigned int status = bmread(dev, STATUS);
1104 if (miscintcount++ < 10) {
1105 XXDEBUG(("bmac_misc_intr\n"));
1107 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1108 /* bmac_txdma_intr_inner(irq, dev_id, regs); */
1109 /* if (status & FrameReceived) bp->stats.rx_dropped++; */
1110 if (status & RxErrorMask) bp->stats.rx_errors++;
1111 if (status & RxCRCCntExp) bp->stats.rx_crc_errors++;
1112 if (status & RxLenCntExp) bp->stats.rx_length_errors++;
1113 if (status & RxOverFlow) bp->stats.rx_over_errors++;
1114 if (status & RxAlignCntExp) bp->stats.rx_frame_errors++;
1116 /* if (status & FrameSent) bp->stats.tx_dropped++; */
1117 if (status & TxErrorMask) bp->stats.tx_errors++;
1118 if (status & TxUnderrun) bp->stats.tx_fifo_errors++;
1119 if (status & TxNormalCollExp) bp->stats.collisions++;
1123 * Procedure for reading EEPROM
1125 #define SROMAddressLength 5
1126 #define DataInOn 0x0008
1127 #define DataInOff 0x0000
1129 #define ChipSelect 0x0001
1130 #define SDIShiftCount 3
1131 #define SD0ShiftCount 2
1132 #define DelayValue 1000 /* number of microseconds */
1133 #define SROMStartOffset 10 /* this is in words */
1134 #define SROMReadCount 3 /* number of words to read from SROM */
1135 #define SROMAddressBits 6
1136 #define EnetAddressOffset 20
1138 static unsigned char
1139 bmac_clock_out_bit(struct net_device *dev)
1141 unsigned short data;
1144 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1147 data = bmread(dev, SROMCSR);
1149 val = (data >> SD0ShiftCount) & 1;
1151 bmwrite(dev, SROMCSR, ChipSelect);
1158 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1160 unsigned short data;
1162 if (val != 0 && val != 1) return;
1164 data = (val << SDIShiftCount);
1165 bmwrite(dev, SROMCSR, data | ChipSelect );
1168 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1171 bmwrite(dev, SROMCSR, data | ChipSelect);
1176 reset_and_select_srom(struct net_device *dev)
1179 bmwrite(dev, SROMCSR, 0);
1182 /* send it the read command (110) */
1183 bmac_clock_in_bit(dev, 1);
1184 bmac_clock_in_bit(dev, 1);
1185 bmac_clock_in_bit(dev, 0);
1188 static unsigned short
1189 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1191 unsigned short data, val;
1194 /* send out the address we want to read from */
1195 for (i = 0; i < addr_len; i++) {
1196 val = addr >> (addr_len-i-1);
1197 bmac_clock_in_bit(dev, val & 1);
1200 /* Now read in the 16-bit data */
1202 for (i = 0; i < 16; i++) {
1203 val = bmac_clock_out_bit(dev);
1207 bmwrite(dev, SROMCSR, 0);
1213 * It looks like Cogent and SMC use different methods for calculating
1214 * checksums. What a pain..
1218 bmac_verify_checksum(struct net_device *dev)
1220 unsigned short data, storedCS;
1222 reset_and_select_srom(dev);
1223 data = read_srom(dev, 3, SROMAddressBits);
1224 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1231 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1234 unsigned short data;
1236 for (i = 0; i < 6; i++)
1238 reset_and_select_srom(dev);
1239 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1240 ea[2*i] = bitrev(data & 0x0ff);
1241 ea[2*i+1] = bitrev((data >> 8) & 0x0ff);
1245 static void bmac_reset_and_enable(struct net_device *dev)
1247 struct bmac_data *bp = dev->priv;
1248 unsigned long flags;
1249 struct sk_buff *skb;
1250 unsigned char *data;
1252 save_flags(flags); cli();
1253 bmac_enable_and_reset_chip(dev);
1254 bmac_init_tx_ring(bp);
1255 bmac_init_rx_ring(bp);
1256 bmac_init_chip(dev);
1257 bmac_start_chip(dev);
1258 bmwrite(dev, INTDISABLE, EnableNormal);
1262 * It seems that the bmac can't receive until it's transmitted
1263 * a packet. So we give it a dummy packet to transmit.
1265 skb = dev_alloc_skb(ETHERMINPACKET);
1267 data = skb_put(skb, ETHERMINPACKET);
1268 memset(data, 0, ETHERMINPACKET);
1269 memcpy(data, dev->dev_addr, 6);
1270 memcpy(data+6, dev->dev_addr, 6);
1271 bmac_transmit_packet(skb, dev);
1273 restore_flags(flags);
1276 static int __init bmac_probe(void)
1278 struct device_node *bmac;
1282 for (bmac = find_devices("bmac"); bmac != 0; bmac = bmac->next)
1283 bmac_probe1(bmac, 0);
1284 for (bmac = find_compatible_devices("network", "bmac+"); bmac != 0;
1286 bmac_probe1(bmac, 1);
1288 if (bmac_devs != 0) {
1289 proc_net_create ("bmac", 0, bmac_proc_info);
1290 #ifdef CONFIG_PMAC_PBOOK
1291 pmu_register_sleep_notifier(&bmac_sleep_notifier);
1297 return bmac_devs? 0: -ENODEV;
1300 static void __init bmac_probe1(struct device_node *bmac, int is_bmac_plus)
1303 struct bmac_data *bp;
1304 unsigned char *addr;
1305 struct net_device *dev;
1307 if (bmac->n_addrs != 3 || bmac->n_intrs != 3) {
1308 printk(KERN_ERR "can't use BMAC %s: need 3 addrs and 3 intrs\n",
1312 addr = get_property(bmac, "mac-address", NULL);
1314 addr = get_property(bmac, "local-mac-address", NULL);
1316 printk(KERN_ERR "Can't get mac-address for BMAC %s\n",
1322 if (bmac_emergency_rxbuf == NULL) {
1323 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1324 if (bmac_emergency_rxbuf == NULL) {
1325 printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
1330 dev = init_etherdev(NULL, PRIV_BYTES);
1332 printk(KERN_ERR "init_etherdev failed, out of memory for BMAC %s\n",
1336 bp = (struct bmac_data *) dev->priv;
1337 SET_MODULE_OWNER(dev);
1340 if (!request_OF_resource(bmac, 0, " (bmac)")) {
1341 printk(KERN_ERR "BMAC: can't request IO resource !\n");
1344 if (!request_OF_resource(bmac, 1, " (bmac tx dma)")) {
1345 printk(KERN_ERR "BMAC: can't request TX DMA resource !\n");
1349 if (!request_OF_resource(bmac, 2, " (bmac rx dma)")) {
1350 printk(KERN_ERR "BMAC: can't request RX DMA resource !\n");
1353 dev->base_addr = (unsigned long)
1354 ioremap(bmac->addrs[0].address, bmac->addrs[0].size);
1355 if (!dev->base_addr)
1357 dev->irq = bmac->intrs[0].line;
1359 bmac_enable_and_reset_chip(dev);
1360 bmwrite(dev, INTDISABLE, DisableAll);
1362 printk(KERN_INFO "%s: BMAC%s at", dev->name, (is_bmac_plus? "+": ""));
1363 rev = addr[0] == 0 && addr[1] == 0xA0;
1364 for (j = 0; j < 6; ++j) {
1365 dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j];
1366 printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]);
1368 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1371 /* Enable chip without interrupts for now */
1372 bmac_enable_and_reset_chip(dev);
1373 bmwrite(dev, INTDISABLE, DisableAll);
1375 dev->open = bmac_open;
1376 dev->stop = bmac_close;
1377 dev->hard_start_xmit = bmac_output;
1378 dev->get_stats = bmac_stats;
1379 dev->set_multicast_list = bmac_set_multicast;
1380 dev->set_mac_address = bmac_set_address;
1382 bmac_get_station_address(dev, addr);
1383 if (bmac_verify_checksum(dev) != 0)
1384 goto err_out_iounmap;
1386 bp->is_bmac_plus = is_bmac_plus;
1387 bp->tx_dma = (volatile struct dbdma_regs *)
1388 ioremap(bmac->addrs[1].address, bmac->addrs[1].size);
1390 goto err_out_iounmap;
1391 bp->tx_dma_intr = bmac->intrs[1].line;
1392 bp->rx_dma = (volatile struct dbdma_regs *)
1393 ioremap(bmac->addrs[2].address, bmac->addrs[2].size);
1395 goto err_out_iounmap_tx;
1396 bp->rx_dma_intr = bmac->intrs[2].line;
1398 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1399 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1401 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1402 skb_queue_head_init(bp->queue);
1404 memset((char *) bp->tx_cmds, 0,
1405 (N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
1406 init_timer(&bp->tx_timeout);
1407 /* bp->timeout_active = 0; */
1409 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1411 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1412 goto err_out_iounmap_rx;
1414 ret = request_irq(bmac->intrs[1].line, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1416 printk(KERN_ERR "BMAC: can't get irq %d\n", bmac->intrs[1].line);
1419 ret = request_irq(bmac->intrs[2].line, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1421 printk(KERN_ERR "BMAC: can't get irq %d\n", bmac->intrs[2].line);
1425 /* Mask chip interrupts and disable chip, will be
1426 * re-enabled on open()
1428 disable_irq(dev->irq);
1429 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
1431 bp->next_bmac = bmac_devs;
1436 free_irq(bmac->intrs[1].line, dev);
1438 free_irq(dev->irq, dev);
1440 iounmap((void *)bp->rx_dma);
1442 iounmap((void *)bp->tx_dma);
1444 iounmap((void *)dev->base_addr);
1447 release_OF_resource(bp->node, 0);
1448 release_OF_resource(bp->node, 1);
1449 release_OF_resource(bp->node, 2);
1450 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
1452 unregister_netdev(dev);
1456 static int bmac_open(struct net_device *dev)
1458 struct bmac_data *bp = (struct bmac_data *) dev->priv;
1459 /* XXDEBUG(("bmac: enter open\n")); */
1460 /* reset the chip */
1462 bmac_reset_and_enable(dev);
1463 enable_irq(dev->irq);
1464 dev->flags |= IFF_RUNNING;
1468 static int bmac_close(struct net_device *dev)
1470 struct bmac_data *bp = (struct bmac_data *) dev->priv;
1471 volatile struct dbdma_regs *rd = bp->rx_dma;
1472 volatile struct dbdma_regs *td = bp->tx_dma;
1473 unsigned short config;
1477 dev->flags &= ~(IFF_UP | IFF_RUNNING);
1479 /* disable rx and tx */
1480 config = bmread(dev, RXCFG);
1481 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1483 config = bmread(dev, TXCFG);
1484 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1486 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1488 /* disable rx and tx dma */
1489 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1490 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1492 /* free some skb's */
1493 XXDEBUG(("bmac: free rx bufs\n"));
1494 for (i=0; i<N_RX_RING; i++) {
1495 if (bp->rx_bufs[i] != NULL) {
1496 dev_kfree_skb(bp->rx_bufs[i]);
1497 bp->rx_bufs[i] = NULL;
1500 XXDEBUG(("bmac: free tx bufs\n"));
1501 for (i = 0; i<N_TX_RING; i++) {
1502 if (bp->tx_bufs[i] != NULL) {
1503 dev_kfree_skb(bp->tx_bufs[i]);
1504 bp->tx_bufs[i] = NULL;
1507 XXDEBUG(("bmac: all bufs freed\n"));
1510 disable_irq(dev->irq);
1511 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
1517 bmac_start(struct net_device *dev)
1519 struct bmac_data *bp = dev->priv;
1521 struct sk_buff *skb;
1522 unsigned long flags;
1527 save_flags(flags); cli();
1529 i = bp->tx_fill + 1;
1532 if (i == bp->tx_empty)
1534 skb = skb_dequeue(bp->queue);
1537 bmac_transmit_packet(skb, dev);
1539 restore_flags(flags);
1543 bmac_output(struct sk_buff *skb, struct net_device *dev)
1545 struct bmac_data *bp = dev->priv;
1546 skb_queue_tail(bp->queue, skb);
1551 static void bmac_tx_timeout(unsigned long data)
1553 struct net_device *dev = (struct net_device *) data;
1554 struct bmac_data *bp = (struct bmac_data *) dev->priv;
1555 volatile struct dbdma_regs *td = bp->tx_dma;
1556 volatile struct dbdma_regs *rd = bp->rx_dma;
1557 volatile struct dbdma_cmd *cp;
1558 unsigned long flags;
1559 unsigned short config, oldConfig;
1562 XXDEBUG(("bmac: tx_timeout called\n"));
1563 save_flags(flags); cli();
1564 bp->timeout_active = 0;
1566 /* update various counters */
1567 /* bmac_handle_misc_intrs(bp, 0); */
1569 cp = &bp->tx_cmds[bp->tx_empty];
1570 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1571 /* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
1572 /* mb->pr, mb->xmtfs, mb->fifofc)); */
1574 /* turn off both tx and rx and reset the chip */
1575 config = bmread(dev, RXCFG);
1576 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1577 config = bmread(dev, TXCFG);
1578 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1579 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1580 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1581 bmac_enable_and_reset_chip(dev);
1583 /* restart rx dma */
1584 cp = bus_to_virt(ld_le32(&rd->cmdptr));
1585 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1586 out_le16(&cp->xfer_status, 0);
1587 out_le32(&rd->cmdptr, virt_to_bus(cp));
1588 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1590 /* fix up the transmit side */
1591 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1592 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1594 ++bp->stats.tx_errors;
1595 if (i != bp->tx_fill) {
1596 dev_kfree_skb(bp->tx_bufs[i]);
1597 bp->tx_bufs[i] = NULL;
1598 if (++i >= N_TX_RING) i = 0;
1602 netif_wake_queue(dev);
1603 if (i != bp->tx_fill) {
1604 cp = &bp->tx_cmds[i];
1605 out_le16(&cp->xfer_status, 0);
1606 out_le16(&cp->command, OUTPUT_LAST);
1607 out_le32(&td->cmdptr, virt_to_bus(cp));
1608 out_le32(&td->control, DBDMA_SET(RUN));
1609 /* bmac_set_timeout(dev); */
1610 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1613 /* turn it back on */
1614 oldConfig = bmread(dev, RXCFG);
1615 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1616 oldConfig = bmread(dev, TXCFG);
1617 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1619 restore_flags(flags);
1623 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1627 for (i=0;i< count;i++) {
1630 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1641 bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1648 if (bmac_devs == NULL)
1651 len += sprintf(buffer, "BMAC counters & registers\n");
1653 for (i = 0; i<N_REG_ENTRIES; i++) {
1654 len += sprintf(buffer + len, "%s: %#08x\n",
1655 reg_entries[i].name,
1656 bmread(bmac_devs, reg_entries[i].reg_offset));
1664 if (pos > offset+length) break;
1667 *start = buffer + (offset - begin);
1668 len -= (offset - begin);
1670 if (len > length) len = length;
1676 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1677 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1678 MODULE_LICENSE("GPL");
1681 static void __exit bmac_cleanup (void)
1683 struct bmac_data *bp;
1684 struct net_device *dev;
1686 if (bmac_emergency_rxbuf != NULL) {
1687 kfree(bmac_emergency_rxbuf);
1688 bmac_emergency_rxbuf = NULL;
1693 #ifdef CONFIG_PMAC_PBOOK
1694 pmu_unregister_sleep_notifier(&bmac_sleep_notifier);
1696 proc_net_remove("bmac");
1700 bp = (struct bmac_data *) dev->priv;
1701 bmac_devs = bp->next_bmac;
1703 unregister_netdev(dev);
1705 release_OF_resource(bp->node, 0);
1706 release_OF_resource(bp->node, 1);
1707 release_OF_resource(bp->node, 2);
1708 free_irq(dev->irq, dev);
1709 free_irq(bp->tx_dma_intr, dev);
1710 free_irq(bp->rx_dma_intr, dev);
1713 } while (bmac_devs != NULL);
1716 module_init(bmac_probe);
1717 module_exit(bmac_cleanup);