setup enviroment for compilation
[linux-2.4.21-pre4.git] / drivers / net / gt96100eth.c
1 /*
2  * Copyright 2000, 2001 MontaVista Software Inc.
3  * Author: MontaVista Software, Inc.
4  *              stevel@mvista.com or source@mvista.com
5  *
6  * ########################################################################
7  *
8  *  This program is free software; you can distribute it and/or modify it
9  *  under the terms of the GNU General Public License (Version 2) as
10  *  published by the Free Software Foundation.
11  *
12  *  This program is distributed in the hope it will be useful, but WITHOUT
13  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15  *  for more details.
16  *
17  *  You should have received a copy of the GNU General Public License along
18  *  with this program; if not, write to the Free Software Foundation, Inc.,
19  *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
20  *
21  * ########################################################################
22  *
23  * Ethernet driver for the MIPS GT96100 Advanced Communication Controller.
24  * 
25  *  Revision history
26  *    
27  *    11.11.2001  Moved to 2.4.14, ppopov@mvista.com.  Modified driver to add
28  *                proper gt96100A support.
29  *    12.05.2001  Moved eth port 0 to irq 3 (mapped to GT_SERINT0 on EV96100A)
30  *                in order for both ports to work. Also cleaned up boot
31  *                option support (mac address string parsing), fleshed out
32  *                gt96100_cleanup_module(), and other general code cleanups
33  *                <stevel@mvista.com>.
34  */
35 #include <linux/config.h>
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/sched.h>
39 #include <linux/string.h>
40 #include <linux/timer.h>
41 #include <linux/errno.h>
42 #include <linux/in.h>
43 #include <linux/ioport.h>
44 #include <linux/slab.h>
45 #include <linux/interrupt.h>
46 #include <linux/pci.h>
47 #include <linux/init.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/skbuff.h>
51 #include <linux/delay.h>
52 #include <linux/ctype.h>
53 #include <asm/irq.h>
54 #include <asm/bitops.h>
55 #include <asm/io.h>
56
57 #define DESC_BE 1
58 #define DESC_DATA_BE 1
59
60 #define GT96100_DEBUG 2
61
62 #include "gt96100eth.h"
63
64 // prototypes
65 static void* dmaalloc(size_t size, dma_addr_t *dma_handle);
66 static void dmafree(size_t size, void *vaddr);
67 static void gt96100_delay(int msec);
68 static int gt96100_add_hash_entry(struct net_device *dev,
69                                   unsigned char* addr);
70 static void read_mib_counters(struct gt96100_private *gp);
71 static int read_MII(int phy_addr, u32 reg);
72 static int write_MII(int phy_addr, u32 reg, u16 data);
73 #if 0
74 static void dump_tx_ring(struct net_device *dev);
75 static void dump_rx_ring(struct net_device *dev);
76 #endif
77 static int gt96100_init_module(void);
78 static void gt96100_cleanup_module(void);
79 static void dump_MII(int dbg_lvl, struct net_device *dev);
80 static void dump_tx_desc(int dbg_lvl, struct net_device *dev, int i);
81 static void dump_rx_desc(int dbg_lvl, struct net_device *dev, int i);
82 static void dump_skb(int dbg_lvl, struct net_device *dev,
83                      struct sk_buff *skb);
84 static void dump_hw_addr(int dbg_lvl, struct net_device *dev,
85                          const char* pfx, unsigned char* addr_str);
86 static void update_stats(struct gt96100_private *gp);
87 static void abort(struct net_device *dev, u32 abort_bits);
88 static void hard_stop(struct net_device *dev);
89 static void enable_ether_irq(struct net_device *dev);
90 static void disable_ether_irq(struct net_device *dev);
91 static int gt96100_probe1(int port_num);
92 static void reset_tx(struct net_device *dev);
93 static void reset_rx(struct net_device *dev);
94 static int gt96100_check_tx_consistent(struct gt96100_private *gp);
95 static int gt96100_init(struct net_device *dev);
96 static int gt96100_open(struct net_device *dev);
97 static int gt96100_close(struct net_device *dev);
98 static int gt96100_tx(struct sk_buff *skb, struct net_device *dev);
99 static int gt96100_rx(struct net_device *dev, u32 status);
100 static void gt96100_interrupt(int irq, void *dev_id, struct pt_regs *regs);
101 static void gt96100_tx_timeout(struct net_device *dev);
102 static void gt96100_set_rx_mode(struct net_device *dev);
103 static struct net_device_stats* gt96100_get_stats(struct net_device *dev);
104
105 extern char * __init prom_getcmdline(void);
106
107 static int max_interrupt_work = 32;
108
109 #define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
110
111 #define RUN_AT(x) (jiffies + (x))
112
113 // For reading/writing 32-bit words and half-words from/to DMA memory
114 #ifdef DESC_BE
115 #define cpu_to_dma32 cpu_to_be32
116 #define dma32_to_cpu be32_to_cpu
117 #define cpu_to_dma16 cpu_to_be16
118 #define dma16_to_cpu be16_to_cpu
119 #else
120 #define cpu_to_dma32 cpu_to_le32
121 #define dma32_to_cpu le32_to_cpu
122 #define cpu_to_dma16 cpu_to_le16
123 #define dma16_to_cpu le16_to_cpu
124 #endif
125
126 static char mac0[18] = "00.02.03.04.05.06";
127 static char mac1[18] = "00.01.02.03.04.05";
128 MODULE_PARM(mac0, "c18");
129 MODULE_PARM(mac1, "c18");
130 MODULE_PARM_DESC(mac0, "MAC address for GT96100 ethernet port 0");
131 MODULE_PARM_DESC(mac1, "MAC address for GT96100 ethernet port 1");
132
133 /*
134  * Info for the GT96100 ethernet controller's ports.
135  */
136 static struct gt96100_if_t {
137         struct net_device *dev;
138         unsigned int  iobase;   // IO Base address of this port
139         int           irq;      // IRQ number of this port
140         char         *mac_str;
141 } gt96100_iflist[NUM_INTERFACES] = {
142         {
143                 NULL,
144                 GT96100_ETH0_BASE, GT96100_ETHER0_IRQ,
145                 mac0
146         },
147         {
148                 NULL,
149                 GT96100_ETH1_BASE, GT96100_ETHER1_IRQ,
150                 mac1
151         }
152 };
153
154 static inline const char*
155 chip_name(int chip_rev)
156 {
157         switch (chip_rev) {
158         case REV_GT96100:
159                 return "GT96100";
160         case REV_GT96100A_1:
161         case REV_GT96100A:
162                 return "GT96100A";
163         default:
164                 return "Unknown GT96100";
165         }
166 }
167
168 /*
169   DMA memory allocation, derived from pci_alloc_consistent.
170 */
171 static void *
172 dmaalloc(size_t size, dma_addr_t *dma_handle)
173 {
174         void *ret;
175         
176         ret = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA,
177                                        get_order(size));
178         
179         if (ret != NULL) {
180                 dma_cache_inv((unsigned long)ret, size);
181                 if (dma_handle != NULL)
182                         *dma_handle = virt_to_phys(ret);
183
184                 /* bump virtual address up to non-cached area */
185                 ret = (void*)KSEG1ADDR(ret);
186         }
187
188         return ret;
189 }
190
191 static void
192 dmafree(size_t size, void *vaddr)
193 {
194         vaddr = (void*)KSEG0ADDR(vaddr);
195         free_pages((unsigned long)vaddr, get_order(size));
196 }
197
198
199
200 static void
201 gt96100_delay(int ms)
202 {
203         if (in_interrupt())
204                 return;
205         else {
206                 current->state = TASK_INTERRUPTIBLE;
207                 schedule_timeout(ms*HZ/1000);
208         }
209 }
210
211 static int
212 parse_mac_addr(struct net_device *dev, char* macstr)
213 {
214         int i, j;
215         unsigned char result, value;
216         
217         for (i=0; i<6; i++) {
218                 result = 0;
219                 if (i != 5 && *(macstr+2) != '.') {
220                         err(__FILE__ "invalid mac address format: %d %c\n",
221                             i, *(macstr+2));
222                         return -EINVAL;
223                 }
224                 
225                 for (j=0; j<2; j++) {
226                         if (isxdigit(*macstr) &&
227                             (value = isdigit(*macstr) ? *macstr-'0' : 
228                              toupper(*macstr)-'A'+10) < 16) {
229                                 result = result*16 + value;
230                                 macstr++;
231                         } else {
232                                 err(__FILE__ "invalid mac address "
233                                     "character: %c\n", *macstr);
234                                 return -EINVAL;
235                         }
236                 }
237
238                 macstr++; // step over '.'
239                 dev->dev_addr[i] = result;
240         }
241
242         return 0;
243 }
244
245
246 static int
247 read_MII(int phy_addr, u32 reg)
248 {
249         int timedout = 20;
250         u32 smir = smirOpCode | (phy_addr << smirPhyAdBit) |
251                 (reg << smirRegAdBit);
252
253         // wait for last operation to complete
254         while (GT96100_READ(GT96100_ETH_SMI_REG) & smirBusy) {
255                 // snooze for 1 msec and check again
256                 gt96100_delay(1);
257
258                 if (--timedout == 0) {
259                         printk(KERN_ERR "%s: busy timeout!!\n", __FUNCTION__);
260                         return -ENODEV;
261                 }
262         }
263     
264         GT96100_WRITE(GT96100_ETH_SMI_REG, smir);
265
266         timedout = 20;
267         // wait for read to complete
268         while (!((smir = GT96100_READ(GT96100_ETH_SMI_REG)) & smirReadValid)) {
269                 // snooze for 1 msec and check again
270                 gt96100_delay(1);
271         
272                 if (--timedout == 0) {
273                         printk(KERN_ERR "%s: timeout!!\n", __FUNCTION__);
274                         return -ENODEV;
275                 }
276         }
277
278         return (int)(smir & smirDataMask);
279 }
280
281 static void
282 dump_tx_desc(int dbg_lvl, struct net_device *dev, int i)
283 {
284         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
285         gt96100_td_t *td = &gp->tx_ring[i];
286
287         dbg(dbg_lvl, "Tx descriptor at 0x%08lx:\n", virt_to_phys(td));
288         dbg(dbg_lvl,
289             "    cmdstat=%04x, byte_cnt=%04x, buff_ptr=%04x, next=%04x\n",
290             dma32_to_cpu(td->cmdstat),
291             dma16_to_cpu(td->byte_cnt),
292             dma32_to_cpu(td->buff_ptr),
293             dma32_to_cpu(td->next));
294 }
295
296 static void
297 dump_rx_desc(int dbg_lvl, struct net_device *dev, int i)
298 {
299         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
300         gt96100_rd_t *rd = &gp->rx_ring[i];
301
302         dbg(dbg_lvl, "Rx descriptor at 0x%08lx:\n", virt_to_phys(rd));
303         dbg(dbg_lvl, "    cmdstat=%04x, buff_sz=%04x, byte_cnt=%04x, "
304             "buff_ptr=%04x, next=%04x\n",
305             dma32_to_cpu(rd->cmdstat),
306             dma16_to_cpu(rd->buff_sz),
307             dma16_to_cpu(rd->byte_cnt),
308             dma32_to_cpu(rd->buff_ptr),
309             dma32_to_cpu(rd->next));
310 }
311
312 static int
313 write_MII(int phy_addr, u32 reg, u16 data)
314 {
315         int timedout = 20;
316         u32 smir = (phy_addr << smirPhyAdBit) |
317                 (reg << smirRegAdBit) | data;
318
319         // wait for last operation to complete
320         while (GT96100_READ(GT96100_ETH_SMI_REG) & smirBusy) {
321                 // snooze for 1 msec and check again
322                 gt96100_delay(1);
323         
324                 if (--timedout == 0) {
325                         printk(KERN_ERR "%s: busy timeout!!\n", __FUNCTION__);
326                         return -1;
327                 }
328         }
329
330         GT96100_WRITE(GT96100_ETH_SMI_REG, smir);
331         return 0;
332 }
333
334 #if 0
335 // These routines work, just disabled to avoid compile warnings
336 static void
337 dump_tx_ring(struct net_device *dev)
338 {
339         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
340         int i;
341
342         dbg(0, "%s: txno/txni/cnt=%d/%d/%d\n", __FUNCTION__,
343             gp->tx_next_out, gp->tx_next_in, gp->tx_count);
344
345         for (i=0; i<TX_RING_SIZE; i++)
346                 dump_tx_desc(0, dev, i);
347 }
348
349 static void
350 dump_rx_ring(struct net_device *dev)
351 {
352         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
353         int i;
354
355         dbg(0, "%s: rxno=%d\n", __FUNCTION__, gp->rx_next_out);
356
357         for (i=0; i<RX_RING_SIZE; i++)
358                 dump_rx_desc(0, dev, i);
359 }
360 #endif
361
362 static void
363 dump_MII(int dbg_lvl, struct net_device *dev)
364 {
365         int i, val;
366         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
367     
368         if (dbg_lvl <= GT96100_DEBUG) {
369                 for (i=0; i<7; i++) {
370                         if ((val = read_MII(gp->phy_addr, i)) >= 0)
371                                 printk("MII Reg %d=%x\n", i, val);
372                 }
373                 for (i=16; i<21; i++) {
374                         if ((val = read_MII(gp->phy_addr, i)) >= 0)
375                                 printk("MII Reg %d=%x\n", i, val);
376                 }
377         }
378 }
379
380 static void
381 dump_hw_addr(int dbg_lvl, struct net_device *dev, const char* pfx,
382              unsigned char* addr_str)
383 {
384         int i;
385         char buf[100], octet[5];
386     
387         if (dbg_lvl <= GT96100_DEBUG) {
388                 strcpy(buf, pfx);
389                 for (i = 0; i < 6; i++) {
390                         sprintf(octet, "%2.2x%s",
391                                 addr_str[i], i<5 ? ":" : "\n");
392                         strcat(buf, octet);
393                 }
394                 info("%s", buf);
395         }
396 }
397
398
399 static void
400 dump_skb(int dbg_lvl, struct net_device *dev, struct sk_buff *skb)
401 {
402         int i;
403         unsigned char* skbdata;
404     
405         if (dbg_lvl <= GT96100_DEBUG) {
406                 dbg(dbg_lvl, "%s: skb=%p, skb->data=%p, skb->len=%d\n",
407                     __FUNCTION__, skb, skb->data, skb->len);
408
409                 skbdata = (unsigned char*)KSEG1ADDR(skb->data);
410     
411                 for (i=0; i<skb->len; i++) {
412                         if (!(i % 16))
413                                 printk(KERN_DEBUG "\n   %3.3x: %2.2x,",
414                                        i, skbdata[i]);
415                         else
416                                 printk(KERN_DEBUG "%2.2x,", skbdata[i]);
417                 }
418                 printk(KERN_DEBUG "\n");
419         }
420 }
421
422
423 static int
424 gt96100_add_hash_entry(struct net_device *dev, unsigned char* addr)
425 {
426         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
427         //u16 hashResult, stmp;
428         //unsigned char ctmp, hash_ea[6];
429         u32 tblEntry1, tblEntry0, *tblEntryAddr;
430         int i;
431
432         tblEntry1 = hteValid | hteRD;
433         tblEntry1 |= (u32)addr[5] << 3;
434         tblEntry1 |= (u32)addr[4] << 11;
435         tblEntry1 |= (u32)addr[3] << 19;
436         tblEntry1 |= ((u32)addr[2] & 0x1f) << 27;
437         dbg(3, "%s: tblEntry1=%x\n", __FUNCTION__, tblEntry1);
438         tblEntry0 = ((u32)addr[2] >> 5) & 0x07;
439         tblEntry0 |= (u32)addr[1] << 3;
440         tblEntry0 |= (u32)addr[0] << 11;
441         dbg(3, "%s: tblEntry0=%x\n", __FUNCTION__, tblEntry0);
442
443 #if 0
444
445         for (i=0; i<6; i++) {
446                 // nibble swap
447                 ctmp = nibswap(addr[i]);
448                 // invert every nibble
449                 hash_ea[i] = ((ctmp&1)<<3) | ((ctmp&8)>>3) |
450                         ((ctmp&2)<<1) | ((ctmp&4)>>1);
451                 hash_ea[i] |= ((ctmp&0x10)<<3) | ((ctmp&0x80)>>3) |
452                         ((ctmp&0x20)<<1) | ((ctmp&0x40)>>1);
453         }
454
455         dump_hw_addr(3, dev, "%s: nib swap/invt addr=", __FUNCTION__, hash_ea);
456     
457         if (gp->hash_mode == 0) {
458                 hashResult = ((u16)hash_ea[0] & 0xfc) << 7;
459                 stmp = ((u16)hash_ea[0] & 0x03) |
460                         (((u16)hash_ea[1] & 0x7f) << 2);
461                 stmp ^= (((u16)hash_ea[1] >> 7) & 0x01) |
462                         ((u16)hash_ea[2] << 1);
463                 stmp ^= (u16)hash_ea[3] | (((u16)hash_ea[4] & 1) << 8);
464                 hashResult |= stmp;
465         } else {
466                 return -1; // don't support hash mode 1
467         }
468
469         dbg(3, "%s: hashResult=%x\n", __FUNCTION__, hashResult);
470
471         tblEntryAddr =
472                 (u32 *)(&gp->hash_table[((u32)hashResult & 0x7ff) << 3]);
473     
474         dbg(3, "%s: tblEntryAddr=%p\n", tblEntryAddr, __FUNCTION__);
475
476         for (i=0; i<HASH_HOP_NUMBER; i++) {
477                 if ((*tblEntryAddr & hteValid) &&
478                     !(*tblEntryAddr & hteSkip)) {
479                         // This entry is already occupied, go to next entry
480                         tblEntryAddr += 2;
481                         dbg(3, "%s: skipping to %p\n", __FUNCTION__, 
482                             tblEntryAddr);
483                 } else {
484                         memset(tblEntryAddr, 0, 8);
485                         tblEntryAddr[1] = cpu_to_dma32(tblEntry1);
486                         tblEntryAddr[0] = cpu_to_dma32(tblEntry0);
487                         break;
488                 }
489         }
490
491         if (i >= HASH_HOP_NUMBER) {
492                 err("%s: expired!\n", __FUNCTION__);
493                 return -1; // Couldn't find an unused entry
494         }
495
496 #else
497
498         tblEntryAddr = (u32 *)gp->hash_table;
499         for (i=0; i<RX_HASH_TABLE_SIZE/4; i+=2) {
500                 tblEntryAddr[i+1] = cpu_to_dma32(tblEntry1);
501                 tblEntryAddr[i] = cpu_to_dma32(tblEntry0);
502         }
503
504 #endif
505     
506         return 0;
507 }
508
509
510 static void
511 read_mib_counters(struct gt96100_private *gp)
512 {
513         u32* mib_regs = (u32*)&gp->mib;
514         int i;
515     
516         for (i=0; i<sizeof(mib_counters_t)/sizeof(u32); i++)
517                 mib_regs[i] = GT96100ETH_READ(gp, GT96100_ETH_MIB_COUNT_BASE +
518                                               i*sizeof(u32));
519 }
520
521
522 static void
523 update_stats(struct gt96100_private *gp)
524 {
525         mib_counters_t *mib = &gp->mib;
526         struct net_device_stats *stats = &gp->stats;
527     
528         read_mib_counters(gp);
529     
530         stats->rx_packets = mib->totalFramesReceived;
531         stats->tx_packets = mib->framesSent;
532         stats->rx_bytes = mib->totalByteReceived;
533         stats->tx_bytes = mib->byteSent;
534         stats->rx_errors = mib->totalFramesReceived - mib->framesReceived;
535         //the tx error counters are incremented by the ISR
536         //rx_dropped incremented by gt96100_rx
537         //tx_dropped incremented by gt96100_tx
538         stats->multicast = mib->multicastFramesReceived;
539         // collisions incremented by gt96100_tx_complete
540         stats->rx_length_errors = mib->oversizeFrames + mib->fragments;
541         // The RxError condition means the Rx DMA encountered a
542         // CPU owned descriptor, which, if things are working as
543         // they should, means the Rx ring has overflowed.
544         stats->rx_over_errors = mib->macRxError;
545         stats->rx_crc_errors = mib->cRCError;
546 }
547
548 static void
549 abort(struct net_device *dev, u32 abort_bits)
550 {
551         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
552         int timedout = 100; // wait up to 100 msec for hard stop to complete
553
554         dbg(3, "%s\n", __FUNCTION__);
555
556         // Return if neither Rx or Tx abort bits are set
557         if (!(abort_bits & (sdcmrAR | sdcmrAT)))
558                 return;
559
560         // make sure only the Rx/Tx abort bits are set
561         abort_bits &= (sdcmrAR | sdcmrAT);
562     
563         spin_lock(&gp->lock);
564
565         // abort any Rx/Tx DMA immediately
566         GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, abort_bits);
567
568         dbg(3, "%s: SDMA comm = %x\n", __FUNCTION__,
569             GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM));
570
571         // wait for abort to complete
572         while (GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM) & abort_bits) {
573                 // snooze for 20 msec and check again
574                 gt96100_delay(1);
575         
576                 if (--timedout == 0) {
577                         err("%s: timeout!!\n", __FUNCTION__);
578                         break;
579                 }
580         }
581
582         spin_unlock(&gp->lock);
583 }
584
585
586 static void
587 hard_stop(struct net_device *dev)
588 {
589         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
590
591         dbg(3, "%s\n", __FUNCTION__);
592
593         disable_ether_irq(dev);
594
595         abort(dev, sdcmrAR | sdcmrAT);
596
597         // disable port
598         GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG, 0);
599 }
600
601
602 static void
603 enable_ether_irq(struct net_device *dev)
604 {
605         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
606         u32 intMask;
607         /*
608          * route ethernet interrupt to GT_SERINT0 for port 0,
609          * GT_INT0 for port 1.
610          */
611         int intr_mask_reg = (gp->port_num == 0) ?
612                 GT96100_SERINT0_MASK : GT96100_INT0_HIGH_MASK;
613         
614         if (gp->chip_rev >= REV_GT96100A_1) {
615                 intMask = icrTxBufferLow | icrTxEndLow |
616                         icrTxErrorLow  | icrRxOVR | icrTxUdr |
617                         icrRxBufferQ0 | icrRxErrorQ0 |
618                         icrMIIPhySTC | icrEtherIntSum;
619         }
620         else {
621                 intMask = icrTxBufferLow | icrTxEndLow |
622                         icrTxErrorLow  | icrRxOVR | icrTxUdr |
623                         icrRxBuffer | icrRxError |
624                         icrMIIPhySTC | icrEtherIntSum;
625         }
626         
627         // unmask interrupts
628         GT96100ETH_WRITE(gp, GT96100_ETH_INT_MASK, intMask);
629     
630         intMask = GT96100_READ(intr_mask_reg);
631         intMask |= 1<<gp->port_num;
632         GT96100_WRITE(intr_mask_reg, intMask);
633 }
634
635 static void
636 disable_ether_irq(struct net_device *dev)
637 {
638         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
639         u32 intMask;
640         int intr_mask_reg = (gp->port_num == 0) ?
641                 GT96100_SERINT0_MASK : GT96100_INT0_HIGH_MASK;
642
643         intMask = GT96100_READ(intr_mask_reg);
644         intMask &= ~(1<<gp->port_num);
645         GT96100_WRITE(intr_mask_reg, intMask);
646     
647         GT96100ETH_WRITE(gp, GT96100_ETH_INT_MASK, 0);
648 }
649
650
651 /*
652  * Init GT96100 ethernet controller driver
653  */
654 int gt96100_init_module(void)
655 {
656         int i, retval=0;
657         u16 vendor_id, device_id;
658         u32 cpuConfig;
659
660 #ifndef CONFIG_MIPS_GT96100ETH
661         return -ENODEV;
662 #endif
663
664         // probe for GT96100 by reading PCI0 vendor/device ID register
665         pcibios_read_config_word(0, 0, PCI_VENDOR_ID, &vendor_id);
666         pcibios_read_config_word(0, 0, PCI_DEVICE_ID, &device_id);
667     
668         if (vendor_id != PCI_VENDOR_ID_GALILEO ||
669             (device_id != PCI_DEVICE_ID_GALILEO_GT96100 &&
670              device_id != PCI_DEVICE_ID_GALILEO_GT96100A)) {
671                 printk(KERN_ERR __FILE__ ": GT96100 not found!\n");
672                 return -ENODEV;
673         }
674
675         cpuConfig = GT96100_READ(GT96100_CPU_INTERF_CONFIG);
676         if (cpuConfig & (1<<12)) {
677                 printk(KERN_ERR __FILE__
678                        ": must be in Big Endian mode!\n");
679                 return -ENODEV;
680         }
681
682         for (i=0; i < NUM_INTERFACES; i++) {
683                 retval |= gt96100_probe1(i);
684         }
685
686         return retval;
687 }
688
689
690
691 static int __init
692 gt96100_probe1(int port_num)
693 {
694         struct gt96100_private *gp = NULL;
695         struct gt96100_if_t *gtif = &gt96100_iflist[port_num];
696         int phy_addr, phy_id1, phy_id2;
697         u32 phyAD;
698         int retval;
699         unsigned char chip_rev;
700         struct net_device *dev = NULL;
701     
702         if (gtif->irq < 0) {
703                 printk(KERN_ERR "%s: irq unknown - probing not supported\n", __FUNCTION_);
704                 return -ENODEV;
705         }
706     
707         pcibios_read_config_byte(0, 0, PCI_REVISION_ID, &chip_rev);
708
709         if (chip_rev >= REV_GT96100A_1) {
710                 phyAD = GT96100_READ(GT96100_ETH_PHY_ADDR_REG);
711                 phy_addr = (phyAD >> (5*port_num)) & 0x1f;
712         } else {
713                 /*
714                  * not sure what's this about -- probably 
715                  * a gt bug
716                  */
717                 phy_addr = port_num;
718                 phyAD = GT96100_READ(GT96100_ETH_PHY_ADDR_REG);
719                 phyAD &= ~(0x1f << (port_num*5));
720                 phyAD |= phy_addr << (port_num*5);
721                 GT96100_WRITE(GT96100_ETH_PHY_ADDR_REG, phyAD);
722         }
723         
724         // probe for the external PHY
725         if ((phy_id1 = read_MII(phy_addr, 2)) <= 0 ||
726             (phy_id2 = read_MII(phy_addr, 3)) <= 0) {
727                 printk(KERN_ERR "%s: no PHY found on MII%d\n", __FUNCTION__, port_num);
728                 return -ENODEV;
729         }
730         
731         if (!request_region(gtif->iobase, GT96100_ETH_IO_SIZE, "GT96100ETH")) {
732                 printk(KERN_ERR "%s: request_region failed\n", __FUNCTION__);
733                 return -EBUSY;
734         }
735
736         dev = init_etherdev(0, sizeof(struct gt96100_private));
737         gtif->dev = dev;
738         
739         /* private struct aligned and zeroed by init_etherdev */
740         /* Fill in the 'dev' fields. */
741         dev->base_addr = gtif->iobase;
742         dev->irq = gtif->irq;
743
744         if ((retval = parse_mac_addr(dev, gtif->mac_str))) {
745                 err("%s: MAC address parse failed\n", __FUNCTION__);
746                 retval = -EINVAL;
747                 goto free_region;
748         }
749
750         /* Initialize our private structure. */
751         if (dev->priv == NULL) {
752
753                 gp = (struct gt96100_private *)kmalloc(sizeof(*gp),
754                                                        GFP_KERNEL);
755                 if (gp == NULL) {
756                         retval = -ENOMEM;
757                         goto free_region;
758                 }
759         
760                 dev->priv = gp;
761         }
762
763         gp = dev->priv;
764
765         memset(gp, 0, sizeof(*gp)); // clear it
766
767         gp->port_num = port_num;
768         gp->io_size = GT96100_ETH_IO_SIZE;
769         gp->port_offset = port_num * GT96100_ETH_IO_SIZE;
770         gp->phy_addr = phy_addr;
771         gp->chip_rev = chip_rev;
772
773         info("%s found at 0x%x, irq %d\n",
774              chip_name(gp->chip_rev), gtif->iobase, gtif->irq);
775         dump_hw_addr(0, dev, "HW Address ", dev->dev_addr);
776         info("%s chip revision=%d\n", chip_name(gp->chip_rev), gp->chip_rev);
777         info("%s ethernet port %d\n", chip_name(gp->chip_rev), gp->port_num);
778         info("external PHY ID1=0x%04x, ID2=0x%04x\n", phy_id1, phy_id2);
779
780         // Allocate Rx and Tx descriptor rings
781         if (gp->rx_ring == NULL) {
782                 // All descriptors in ring must be 16-byte aligned
783                 gp->rx_ring = dmaalloc(sizeof(gt96100_rd_t) * RX_RING_SIZE
784                                        + sizeof(gt96100_td_t) * TX_RING_SIZE,
785                                        &gp->rx_ring_dma);
786                 if (gp->rx_ring == NULL) {
787                         retval = -ENOMEM;
788                         goto free_region;
789                 }
790         
791                 gp->tx_ring = (gt96100_td_t *)(gp->rx_ring + RX_RING_SIZE);
792                 gp->tx_ring_dma =
793                         gp->rx_ring_dma + sizeof(gt96100_rd_t) * RX_RING_SIZE;
794         }
795     
796         // Allocate the Rx Data Buffers
797         if (gp->rx_buff == NULL) {
798                 gp->rx_buff = dmaalloc(PKT_BUF_SZ*RX_RING_SIZE,
799                                        &gp->rx_buff_dma);
800                 if (gp->rx_buff == NULL) {
801                         dmafree(sizeof(gt96100_rd_t) * RX_RING_SIZE
802                                 + sizeof(gt96100_td_t) * TX_RING_SIZE,
803                                 gp->rx_ring);
804                         retval = -ENOMEM;
805                         goto free_region;
806                 }
807         }
808     
809         dbg(3, "%s: rx_ring=%p, tx_ring=%p\n", __FUNCTION__,
810             gp->rx_ring, gp->tx_ring);
811
812         // Allocate Rx Hash Table
813         if (gp->hash_table == NULL) {
814                 gp->hash_table = (char*)dmaalloc(RX_HASH_TABLE_SIZE,
815                                                  &gp->hash_table_dma);
816                 if (gp->hash_table == NULL) {
817                         dmafree(sizeof(gt96100_rd_t) * RX_RING_SIZE
818                                 + sizeof(gt96100_td_t) * TX_RING_SIZE,
819                                 gp->rx_ring);
820                         dmafree(PKT_BUF_SZ*RX_RING_SIZE, gp->rx_buff);
821                         retval = -ENOMEM;
822                         goto free_region;
823                 }
824         }
825     
826         dbg(3, "%s: hash=%p\n", __FUNCTION__, gp->hash_table);
827
828         spin_lock_init(&gp->lock);
829     
830         dev->open = gt96100_open;
831         dev->hard_start_xmit = gt96100_tx;
832         dev->stop = gt96100_close;
833         dev->get_stats = gt96100_get_stats;
834         //dev->do_ioctl = gt96100_ioctl;
835         dev->set_multicast_list = gt96100_set_rx_mode;
836         dev->tx_timeout = gt96100_tx_timeout;
837         dev->watchdog_timeo = GT96100ETH_TX_TIMEOUT;
838
839         /* Fill in the fields of the device structure with ethernet values. */
840         ether_setup(dev);
841         return 0;
842
843  free_region:
844         release_region(gtif->iobase, GT96100_ETH_IO_SIZE);
845         unregister_netdev(dev);
846         if (dev->priv != NULL)
847                 kfree (dev->priv);
848         kfree (dev);
849         err("%s failed.  Returns %d\n", __FUNCTION__, retval);
850         return retval;
851 }
852
853
854 static void
855 reset_tx(struct net_device *dev)
856 {
857         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
858         int i;
859
860         abort(dev, sdcmrAT);
861
862         for (i=0; i<TX_RING_SIZE; i++) {
863                 if (gp->tx_skbuff[i]) {
864                         if (in_interrupt())
865                                 dev_kfree_skb_irq(gp->tx_skbuff[i]);
866                         else
867                                 dev_kfree_skb(gp->tx_skbuff[i]);
868                         gp->tx_skbuff[i] = NULL;
869                 }
870
871                 gp->tx_ring[i].cmdstat = 0; // CPU owns
872                 gp->tx_ring[i].byte_cnt = 0;
873                 gp->tx_ring[i].buff_ptr = 0;
874                 gp->tx_ring[i].next =
875                         cpu_to_dma32(gp->tx_ring_dma +
876                                      sizeof(gt96100_td_t) * (i+1));
877                 dump_tx_desc(4, dev, i);
878         }
879         /* Wrap the ring. */
880         gp->tx_ring[i-1].next = cpu_to_dma32(gp->tx_ring_dma);
881     
882         // setup only the lowest priority TxCDP reg
883         GT96100ETH_WRITE(gp, GT96100_ETH_CURR_TX_DESC_PTR0, gp->tx_ring_dma);
884         GT96100ETH_WRITE(gp, GT96100_ETH_CURR_TX_DESC_PTR1, 0);
885
886         // init Tx indeces and pkt counter
887         gp->tx_next_in = gp->tx_next_out = 0;
888         gp->tx_count = 0;
889
890 }
891
892 static void
893 reset_rx(struct net_device *dev)
894 {
895         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
896         int i;
897
898         abort(dev, sdcmrAR);
899     
900         for (i=0; i<RX_RING_SIZE; i++) {
901                 gp->rx_ring[i].next =
902                         cpu_to_dma32(gp->rx_ring_dma +
903                                      sizeof(gt96100_rd_t) * (i+1));
904                 gp->rx_ring[i].buff_ptr =
905                         cpu_to_dma32(gp->rx_buff_dma + i*PKT_BUF_SZ);
906                 gp->rx_ring[i].buff_sz = cpu_to_dma16(PKT_BUF_SZ);
907                 // Give ownership to device, set first and last, enable intr
908                 gp->rx_ring[i].cmdstat =
909                         cpu_to_dma32((u32)(rxFirst | rxLast | rxOwn | rxEI));
910                 dump_rx_desc(4, dev, i);
911         }
912         /* Wrap the ring. */
913         gp->rx_ring[i-1].next = cpu_to_dma32(gp->rx_ring_dma);
914
915         // Setup only the lowest priority RxFDP and RxCDP regs
916         for (i=0; i<4; i++) {
917                 if (i == 0) {
918                         GT96100ETH_WRITE(gp, GT96100_ETH_1ST_RX_DESC_PTR0,
919                                          gp->rx_ring_dma);
920                         GT96100ETH_WRITE(gp, GT96100_ETH_CURR_RX_DESC_PTR0,
921                                          gp->rx_ring_dma);
922                 } else {
923                         GT96100ETH_WRITE(gp,
924                                          GT96100_ETH_1ST_RX_DESC_PTR0 + i*4,
925                                          0);
926                         GT96100ETH_WRITE(gp,
927                                          GT96100_ETH_CURR_RX_DESC_PTR0 + i*4,
928                                          0);
929                 }
930         }
931
932         // init Rx NextOut index
933         gp->rx_next_out = 0;
934 }
935
936
937 // Returns 1 if the Tx counter and indeces don't gel
938 static int
939 gt96100_check_tx_consistent(struct gt96100_private *gp)
940 {
941         int diff = gp->tx_next_in - gp->tx_next_out;
942
943         diff = diff<0 ? TX_RING_SIZE + diff : diff;
944         diff = gp->tx_count == TX_RING_SIZE ? diff + TX_RING_SIZE : diff;
945     
946         return (diff != gp->tx_count);
947 }
948
949 static int
950 gt96100_init(struct net_device *dev)
951 {
952         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
953         u32 tmp;
954         u16 mii_reg;
955     
956         dbg(3, "%s: dev=%p\n", __FUNCTION__, dev);
957         dbg(3, "%s: scs10_lo=%4x, scs10_hi=%4x\n", __FUNCTION__, 
958             GT96100_READ(0x8), GT96100_READ(0x10));
959         dbg(3, "%s: scs32_lo=%4x, scs32_hi=%4x\n", __FUNCTION__,
960             GT96100_READ(0x18), GT96100_READ(0x20));
961     
962         // Stop and disable Port
963         hard_stop(dev);
964     
965         // Setup CIU Arbiter
966         tmp = GT96100_READ(GT96100_CIU_ARBITER_CONFIG);
967         tmp |= (0x0c << (gp->port_num*2)); // set Ether DMA req priority to hi
968 #ifndef DESC_BE
969         tmp &= ~(1<<31);                   // set desc endianess to little
970 #else
971         tmp |= (1<<31);
972 #endif
973         GT96100_WRITE(GT96100_CIU_ARBITER_CONFIG, tmp);
974         dbg(3, "%s: CIU Config=%x/%x\n", __FUNCTION__, 
975             tmp, GT96100_READ(GT96100_CIU_ARBITER_CONFIG));
976
977         // Set routing.
978         tmp = GT96100_READ(GT96100_ROUTE_MAIN) & (0x3f << 18);
979         tmp |= (0x07 << (18 + gp->port_num*3));
980         GT96100_WRITE(GT96100_ROUTE_MAIN, tmp);
981
982         /* set MII as peripheral func */
983         tmp = GT96100_READ(GT96100_GPP_CONFIG2);
984         tmp |= 0x7fff << (gp->port_num*16);
985         GT96100_WRITE(GT96100_GPP_CONFIG2, tmp);
986         
987         /* Set up MII port pin directions */
988         tmp = GT96100_READ(GT96100_GPP_IO2);
989         tmp |= 0x003d << (gp->port_num*16);
990         GT96100_WRITE(GT96100_GPP_IO2, tmp);
991
992         // Set-up hash table
993         memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE); // clear it
994         gp->hash_mode = 0;
995         // Add a single entry to hash table - our ethernet address
996         gt96100_add_hash_entry(dev, dev->dev_addr);
997         // Set-up DMA ptr to hash table
998         GT96100ETH_WRITE(gp, GT96100_ETH_HASH_TBL_PTR, gp->hash_table_dma);
999         dbg(3, "%s: Hash Tbl Ptr=%x\n", __FUNCTION__,
1000             GT96100ETH_READ(gp, GT96100_ETH_HASH_TBL_PTR));
1001
1002         // Setup Tx
1003         reset_tx(dev);
1004
1005         dbg(3, "%s: Curr Tx Desc Ptr0=%x\n", __FUNCTION__,
1006             GT96100ETH_READ(gp, GT96100_ETH_CURR_TX_DESC_PTR0));
1007
1008         // Setup Rx
1009         reset_rx(dev);
1010
1011         dbg(3, "%s: 1st/Curr Rx Desc Ptr0=%x/%x\n", __FUNCTION__,
1012             GT96100ETH_READ(gp, GT96100_ETH_1ST_RX_DESC_PTR0),
1013             GT96100ETH_READ(gp, GT96100_ETH_CURR_RX_DESC_PTR0));
1014
1015         // eth port config register
1016         GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT,
1017                          pcxrFCTL | pcxrFCTLen | pcxrFLP | pcxrDPLXen);
1018
1019         mii_reg = read_MII(gp->phy_addr, 0x11); /* int enable register */
1020         mii_reg |= 2;  /* enable mii interrupt */
1021         write_MII(gp->phy_addr, 0x11, mii_reg);
1022         
1023         dbg(3, "%s: PhyAD=%x\n", __FUNCTION__,
1024             GT96100_READ(GT96100_ETH_PHY_ADDR_REG));
1025
1026         // setup DMA
1027
1028         // We want the Rx/Tx DMA to write/read data to/from memory in
1029         // Big Endian mode. Also set DMA Burst Size to 8 64Bit words.
1030 #ifdef DESC_DATA_BE
1031         GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_CONFIG,
1032                          (0xf<<sdcrRCBit) | sdcrRIFB | (3<<sdcrBSZBit));
1033 #else
1034         GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_CONFIG,
1035                          sdcrBLMR | sdcrBLMT |
1036                          (0xf<<sdcrRCBit) | sdcrRIFB | (3<<sdcrBSZBit));
1037 #endif
1038         dbg(3, "%s: SDMA Config=%x\n", __FUNCTION__,
1039             GT96100ETH_READ(gp, GT96100_ETH_SDMA_CONFIG));
1040
1041         // start Rx DMA
1042         GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, sdcmrERD);
1043         dbg(3, "%s: SDMA Comm=%x\n", __FUNCTION__,
1044             GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM));
1045     
1046         // enable this port (set hash size to 1/2K)
1047         GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG, pcrEN | pcrHS);
1048         dbg(3, "%s: Port Config=%x\n", __FUNCTION__,
1049             GT96100ETH_READ(gp, GT96100_ETH_PORT_CONFIG));
1050     
1051         /*
1052          * Disable all Type-of-Service queueing. All Rx packets will be
1053          * treated normally and will be sent to the lowest priority
1054          * queue.
1055          *
1056          * Disable flow-control for now. FIXME: support flow control?
1057          */
1058
1059         // clear all the MIB ctr regs
1060         GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT,
1061                          pcxrFCTL | pcxrFCTLen | pcxrFLP |
1062                          pcxrPRIOrxOverride);
1063         read_mib_counters(gp);
1064         GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT,
1065                          pcxrFCTL | pcxrFCTLen | pcxrFLP |
1066                          pcxrPRIOrxOverride | pcxrMIBclrMode);
1067     
1068         dbg(3, "%s: Port Config Ext=%x\n", __FUNCTION__,
1069             GT96100ETH_READ(gp, GT96100_ETH_PORT_CONFIG_EXT));
1070
1071         netif_start_queue(dev);
1072
1073         dump_MII(4, dev);
1074
1075         // enable interrupts
1076         enable_ether_irq(dev);
1077
1078         // we should now be receiving frames
1079         return 0;
1080 }
1081
1082
1083 static int
1084 gt96100_open(struct net_device *dev)
1085 {
1086         int retval;
1087     
1088         MOD_INC_USE_COUNT;
1089
1090         dbg(2, "%s: dev=%p\n", __FUNCTION__, dev);
1091
1092         // Initialize and startup the GT-96100 ethernet port
1093         if ((retval = gt96100_init(dev))) {
1094                 err("error in gt96100_init\n");
1095                 free_irq(dev->irq, dev);
1096                 MOD_DEC_USE_COUNT;
1097                 return retval;
1098         }
1099
1100         if ((retval = request_irq(dev->irq, &gt96100_interrupt,
1101                                   SA_SHIRQ, dev->name, dev))) {
1102                 err("unable to get IRQ %d\n", dev->irq);
1103                 MOD_DEC_USE_COUNT;
1104                 return retval;
1105         }
1106         
1107         dbg(2, "%s: Initialization done.\n", __FUNCTION__);
1108
1109         return 0;
1110 }
1111
1112 static int
1113 gt96100_close(struct net_device *dev)
1114 {
1115         dbg(3, "%s: dev=%p\n", __FUNCTION__, dev);
1116
1117         // stop the device
1118         if (netif_device_present(dev)) {
1119                 netif_stop_queue(dev);
1120                 hard_stop(dev);
1121         }
1122
1123         free_irq(dev->irq, dev);
1124     
1125         MOD_DEC_USE_COUNT;
1126         return 0;
1127 }
1128
1129
1130 static int
1131 gt96100_tx(struct sk_buff *skb, struct net_device *dev)
1132 {
1133         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
1134         unsigned long flags;
1135         int nextIn;
1136
1137         spin_lock_irqsave(&gp->lock, flags);
1138
1139         nextIn = gp->tx_next_in;
1140
1141         dbg(3, "%s: nextIn=%d\n", __FUNCTION__, nextIn);
1142     
1143         if (gp->tx_count >= TX_RING_SIZE) {
1144                 warn("Tx Ring full, pkt dropped.\n");
1145                 gp->stats.tx_dropped++;
1146                 spin_unlock_irqrestore(&gp->lock, flags);
1147                 return 1;
1148         }
1149     
1150         if (!(gp->last_psr & psrLink)) {
1151                 err("%s: Link down, pkt dropped.\n", __FUNCTION__);
1152                 gp->stats.tx_dropped++;
1153                 spin_unlock_irqrestore(&gp->lock, flags);
1154                 return 1;
1155         }
1156     
1157         if (dma32_to_cpu(gp->tx_ring[nextIn].cmdstat) & txOwn) {
1158                 err("%s: device owns descriptor, pkt dropped.\n", __FUNCTION__);
1159                 gp->stats.tx_dropped++;
1160                 // stop the queue, so Tx timeout can fix it
1161                 netif_stop_queue(dev);
1162                 spin_unlock_irqrestore(&gp->lock, flags);
1163                 return 1;
1164         }
1165     
1166         // Prepare the Descriptor at tx_next_in
1167         gp->tx_skbuff[nextIn] = skb;
1168         gp->tx_ring[nextIn].byte_cnt = cpu_to_dma16(skb->len);
1169         gp->tx_ring[nextIn].buff_ptr = cpu_to_dma32(virt_to_phys(skb->data));
1170         // make sure packet gets written back to memory
1171         dma_cache_wback_inv((unsigned long)(skb->data), skb->len);
1172         // Give ownership to device, set first and last desc, enable interrupt
1173         // Setting of ownership bit must be *last*!
1174         gp->tx_ring[nextIn].cmdstat =
1175                 cpu_to_dma32((u32)(txOwn | txGenCRC | txEI |
1176                                    txPad | txFirst | txLast));
1177     
1178         dump_tx_desc(4, dev, nextIn);
1179         dump_skb(4, dev, skb);
1180
1181         // increment tx_next_in with wrap
1182         gp->tx_next_in = (nextIn + 1) % TX_RING_SIZE;
1183         // If DMA is stopped, restart
1184         if (!(GT96100ETH_READ(gp, GT96100_ETH_PORT_STATUS) & psrTxLow))
1185                 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM,
1186                                  sdcmrERD | sdcmrTXDL);
1187
1188         // increment count and stop queue if full
1189         if (++gp->tx_count == TX_RING_SIZE) {
1190                 gp->tx_full = 1;
1191                 netif_stop_queue(dev);
1192                 dbg(2, "Tx Ring now full, queue stopped.\n");
1193         }
1194     
1195         dev->trans_start = jiffies;
1196         spin_unlock_irqrestore(&gp->lock, flags);
1197
1198         return 0;
1199 }
1200
1201
1202 static int
1203 gt96100_rx(struct net_device *dev, u32 status)
1204 {
1205         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
1206         struct sk_buff *skb;
1207         int pkt_len, nextOut, cdp;
1208         gt96100_rd_t *rd;
1209         u32 cmdstat;
1210     
1211         dbg(3, "%s: dev=%p, status=%x\n", __FUNCTION__, dev, status);
1212
1213         cdp = (GT96100ETH_READ(gp, GT96100_ETH_1ST_RX_DESC_PTR0)
1214                - gp->rx_ring_dma) / sizeof(gt96100_rd_t);
1215
1216         // Continue until we reach 1st descriptor pointer
1217         for (nextOut = gp->rx_next_out; nextOut != cdp;
1218              nextOut = (nextOut + 1) % RX_RING_SIZE) {
1219         
1220                 if (--gp->intr_work_done == 0)
1221                         break;
1222
1223                 rd = &gp->rx_ring[nextOut];
1224                 cmdstat = dma32_to_cpu(rd->cmdstat);
1225         
1226                 dbg(4, "%s: Rx desc cmdstat=%x, nextOut=%d\n", __FUNCTION__,
1227                     cmdstat, nextOut);
1228
1229                 if (cmdstat & (u32)rxOwn) {
1230                         //err(__FUNCTION__ ": device owns descriptor!\n");
1231                         // DMA is not finished updating descriptor???
1232                         // Leave and come back later to pick-up where
1233                         // we left off.
1234                         break;
1235                 }
1236
1237                 // Drop this received pkt if there were any errors
1238                 if (((cmdstat & (u32)(rxErrorSummary)) &&
1239                      (cmdstat & (u32)(rxFirst))) || (status & icrRxError)) {
1240                         // update the detailed rx error counters that
1241                         // are not covered by the MIB counters.
1242                         if (cmdstat & (u32)rxOverrun)
1243                                 gp->stats.rx_fifo_errors++;
1244                         cmdstat |= (u32)rxOwn;
1245                         rd->cmdstat = cpu_to_dma32(cmdstat);
1246                         continue;
1247                 }
1248
1249                 /*
1250                  * Must be first and last (ie only) descriptor of packet. We
1251                  * ignore (drop) any packets that do not fit in one descriptor.
1252                  * Every descriptor's receive buffer is large enough to hold
1253                  * the maximum 802.3 frame size, so a multi-descriptor packet
1254                  * indicates an error. Most if not all corrupted packets will
1255                  * have already been dropped by the above check for the
1256                  * rxErrorSummary status bit.
1257                  */
1258                 if (!(cmdstat & (u32)rxFirst) || !(cmdstat & (u32)rxLast)) {
1259                         if (cmdstat & (u32)rxFirst) {
1260                                 /*
1261                                  * This is the first descriptor of a
1262                                  * multi-descriptor packet. It isn't corrupted
1263                                  * because the above check for rxErrorSummary
1264                                  * would have dropped it already, so what's
1265                                  * the deal with this packet? Good question,
1266                                  * let's dump it out.
1267                                  */
1268                                 err("%s: desc not first and last!\n", __FUNCTION__);
1269                                 dump_rx_desc(0, dev, nextOut);
1270                         }
1271                         cmdstat |= (u32)rxOwn;
1272                         rd->cmdstat = cpu_to_dma32(cmdstat);
1273                         // continue to drop every descriptor of this packet
1274                         continue;
1275                 }
1276         
1277                 pkt_len = dma16_to_cpu(rd->byte_cnt);
1278         
1279                 /* Create new skb. */
1280                 skb = dev_alloc_skb(pkt_len+2);
1281                 if (skb == NULL) {
1282                         err("%s: Memory squeeze, dropping packet.\n", __FUNCTION__);
1283                         gp->stats.rx_dropped++;
1284                         cmdstat |= (u32)rxOwn;
1285                         rd->cmdstat = cpu_to_dma32(cmdstat);
1286                         continue;
1287                 }
1288                 skb->dev = dev;
1289                 skb_reserve(skb, 2);   /* 16 byte IP header align */
1290                 memcpy(skb_put(skb, pkt_len),
1291                        &gp->rx_buff[nextOut*PKT_BUF_SZ], pkt_len);
1292                 skb->protocol = eth_type_trans(skb, dev);
1293                 dump_skb(4, dev, skb);
1294         
1295                 netif_rx(skb);        /* pass the packet to upper layers */
1296                 dev->last_rx = jiffies;
1297
1298                 // now we can release ownership of this desc back to device
1299                 cmdstat |= (u32)rxOwn;
1300                 rd->cmdstat = cpu_to_dma32(cmdstat);
1301         }
1302     
1303         if (nextOut == gp->rx_next_out)
1304                 dbg(3, "%s: RxCDP did not increment?\n", __FUNCTION__);
1305
1306         gp->rx_next_out = nextOut;
1307         return 0;
1308 }
1309
1310
1311 static void
1312 gt96100_tx_complete(struct net_device *dev, u32 status)
1313 {
1314         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
1315         int nextOut, cdp;
1316         gt96100_td_t *td;
1317         u32 cmdstat;
1318
1319         cdp = (GT96100ETH_READ(gp, GT96100_ETH_CURR_TX_DESC_PTR0)
1320                - gp->tx_ring_dma) / sizeof(gt96100_td_t);
1321     
1322         // Continue until we reach the current descriptor pointer
1323         for (nextOut = gp->tx_next_out; nextOut != cdp;
1324              nextOut = (nextOut + 1) % TX_RING_SIZE) {
1325         
1326                 if (--gp->intr_work_done == 0)
1327                         break;
1328
1329                 td = &gp->tx_ring[nextOut];
1330                 cmdstat = dma32_to_cpu(td->cmdstat);
1331         
1332                 dbg(3, "%s: Tx desc cmdstat=%x, nextOut=%d\n", __FUNCTION__,
1333                     cmdstat, nextOut);
1334         
1335                 if (cmdstat & (u32)txOwn) {
1336                         //dump_tx_ring(dev);
1337                         // DMA is not finished writing descriptor???
1338                         // Leave and come back later to pick-up where
1339                         // we left off.
1340                         break;
1341                 }
1342         
1343                 // increment Tx error stats
1344                 if (cmdstat & (u32)txErrorSummary) {
1345                         dbg(2, "%s: Tx error, cmdstat = %x\n", __FUNCTION__,
1346                             cmdstat);
1347                         gp->stats.tx_errors++;
1348                         if (cmdstat & (u32)txReTxLimit)
1349                                 gp->stats.tx_aborted_errors++;
1350                         if (cmdstat & (u32)txUnderrun)
1351                                 gp->stats.tx_fifo_errors++;
1352                         if (cmdstat & (u32)txLateCollision)
1353                                 gp->stats.tx_window_errors++;
1354                 }
1355         
1356                 if (cmdstat & (u32)txCollision)
1357                         gp->stats.collisions +=
1358                                 (u32)((cmdstat & txReTxCntMask) >>
1359                                       txReTxCntBit);
1360
1361                 // Wake the queue if the ring was full
1362                 if (gp->tx_full) {
1363                         gp->tx_full = 0;
1364                         if (gp->last_psr & psrLink) {
1365                                 netif_wake_queue(dev);
1366                                 dbg(2, "%s: Tx Ring was full, queue waked\n", __FUNCTION_);
1367                         }
1368                 }
1369         
1370                 // decrement tx ring buffer count
1371                 if (gp->tx_count) gp->tx_count--;
1372         
1373                 // free the skb
1374                 if (gp->tx_skbuff[nextOut]) {
1375                         dbg(3, "%s: good Tx, skb=%p\n", __FUNCTION__,
1376                             gp->tx_skbuff[nextOut]);
1377                         dev_kfree_skb_irq(gp->tx_skbuff[nextOut]);
1378                         gp->tx_skbuff[nextOut] = NULL;
1379                 } else {
1380                         err("%s: no skb!\n", __FUNCTION__);
1381                 }
1382         }
1383
1384         gp->tx_next_out = nextOut;
1385
1386         if (gt96100_check_tx_consistent(gp)) {
1387                 err("%s: Tx queue inconsistent!\n", __FUNCTION__);
1388         }
1389     
1390         if ((status & icrTxEndLow) && gp->tx_count != 0) {
1391                 // we must restart the DMA
1392                 dbg(3, "%s: Restarting Tx DMA\n", __FUNCTION__);
1393                 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM,
1394                                  sdcmrERD | sdcmrTXDL);
1395         }
1396 }
1397
1398
1399 static void
1400 gt96100_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1401 {
1402         struct net_device *dev = (struct net_device *)dev_id;
1403         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
1404         u32 status;
1405     
1406         if (dev == NULL) {
1407                 err("%s: null dev ptr\n", __FUNCTION__);
1408                 return;
1409         }
1410
1411         dbg(3, "%s: entry, icr=%x\n", __FUNCTION__,
1412             GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE));
1413
1414         spin_lock(&gp->lock);
1415
1416         gp->intr_work_done = max_interrupt_work;
1417
1418         while (gp->intr_work_done > 0) {
1419
1420                 status = GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE);
1421                 // ACK interrupts
1422                 GT96100ETH_WRITE(gp, GT96100_ETH_INT_CAUSE, ~status);
1423
1424                 if ((status & icrEtherIntSum) == 0 &&
1425                     !(status & (icrTxBufferLow|icrTxBufferHigh|icrRxBuffer)))
1426                         break;
1427         
1428                 if (status & icrMIIPhySTC) {
1429                         u32 psr = GT96100ETH_READ(gp, GT96100_ETH_PORT_STATUS);
1430                         if (gp->last_psr != psr) {
1431                                 dbg(0, "port status:\n");
1432                                 dbg(0, "    %s MBit/s, %s-duplex, "
1433                                     "flow-control %s, link is %s,\n",
1434                                     psr & psrSpeed ? "100":"10",
1435                                     psr & psrDuplex ? "full":"half",
1436                                     psr & psrFctl ? "disabled":"enabled",
1437                                     psr & psrLink ? "up":"down");
1438                                 dbg(0, "    TxLowQ is %s, TxHighQ is %s, "
1439                                     "Transmitter is %s\n",
1440                                     psr & psrTxLow ? "running":"stopped",
1441                                     psr & psrTxHigh ? "running":"stopped",
1442                                     psr & psrTxInProg ? "on":"off");
1443                 
1444                                 if ((psr & psrLink) && !gp->tx_full &&
1445                                     netif_queue_stopped(dev)) {
1446                                         dbg(0, ": Link up, waking queue.\n",
1447                                             __FUNCTION_);
1448                                         netif_wake_queue(dev);
1449                                 } else if (!(psr & psrLink) &&
1450                                            !netif_queue_stopped(dev)) {
1451                                         dbg(0, "Link down, stopping queue.\n",
1452                                             __FUNCTION__);
1453                                         netif_stop_queue(dev);
1454                                 }
1455
1456                                 gp->last_psr = psr;
1457                         }
1458
1459                         if (--gp->intr_work_done == 0)
1460                                 break;
1461                 }
1462         
1463                 if (status & (icrTxBufferLow | icrTxEndLow))
1464                         gt96100_tx_complete(dev, status);
1465
1466                 if (status & (icrRxBuffer | icrRxError)) {
1467                         gt96100_rx(dev, status);
1468                 }
1469         
1470                 // Now check TX errors (RX errors were handled in gt96100_rx)
1471                 if (status & icrTxErrorLow) {
1472                         err("%s: Tx resource error\n", __FUNCTION__);
1473                         if (--gp->intr_work_done == 0)
1474                                 break;
1475                 }
1476         
1477                 if (status & icrTxUdr) {
1478                         err("%s: Tx underrun error\n", __FUNCTION__);
1479                         if (--gp->intr_work_done == 0)
1480                                 break;
1481                 }
1482         }
1483
1484         if (gp->intr_work_done == 0) {
1485                 // ACK any remaining pending interrupts
1486                 GT96100ETH_WRITE(gp, GT96100_ETH_INT_CAUSE, 0);
1487                 dbg(3, "%s: hit max work\n", __FUNCTION__);
1488         }
1489     
1490         dbg(3, "%s: exit, icr=%x\n", __FUNCTION__,
1491             GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE));
1492
1493         spin_unlock(&gp->lock);
1494 }
1495
1496
1497 static void
1498 gt96100_tx_timeout(struct net_device *dev)
1499 {
1500         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
1501         unsigned long flags;
1502     
1503         spin_lock_irqsave(&gp->lock, flags);
1504     
1505         if (!(gp->last_psr & psrLink)) {
1506                 err("tx_timeout: link down.\n");
1507                 spin_unlock_irqrestore(&gp->lock, flags);
1508         } else {
1509                 if (gt96100_check_tx_consistent(gp))
1510                         err("tx_timeout: Tx ring error.\n");
1511
1512                 disable_ether_irq(dev);
1513                 spin_unlock_irqrestore(&gp->lock, flags);
1514                 reset_tx(dev);
1515                 enable_ether_irq(dev);
1516         
1517                 netif_wake_queue(dev);
1518         }
1519 }
1520
1521
1522 static void
1523 gt96100_set_rx_mode(struct net_device *dev)
1524 {
1525         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
1526         unsigned long flags;
1527         //struct dev_mc_list *mcptr;
1528     
1529         dbg(3, "%s: dev=%p, flags=%x\n", __FUNCTION__, dev, dev->flags);
1530
1531         // stop the Receiver DMA
1532         abort(dev, sdcmrAR);
1533
1534         spin_lock_irqsave(&gp->lock, flags);
1535
1536         if (dev->flags & IFF_PROMISC) {
1537                 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG,
1538                                  pcrEN | pcrHS | pcrPM);
1539         }
1540
1541 #if 0
1542         /*
1543           FIXME: currently multicast doesn't work - need to get hash table
1544           working first.
1545         */
1546         if (dev->mc_count) {
1547                 // clear hash table
1548                 memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE);
1549                 // Add our ethernet address
1550                 gt96100_add_hash_entry(dev, dev->dev_addr);
1551
1552                 for (mcptr = dev->mc_list; mcptr; mcptr = mcptr->next) {
1553                         dump_hw_addr(2, dev, __FUNCTION__ ": addr=",
1554                                      mcptr->dmi_addr);
1555                         gt96100_add_hash_entry(dev, mcptr->dmi_addr);
1556                 }
1557         }
1558 #endif
1559     
1560         // restart Rx DMA
1561         GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, sdcmrERD);
1562
1563         spin_unlock_irqrestore(&gp->lock, flags);
1564 }
1565
1566 static struct net_device_stats *
1567 gt96100_get_stats(struct net_device *dev)
1568 {
1569         struct gt96100_private *gp = (struct gt96100_private *)dev->priv;
1570         unsigned long flags;
1571
1572         dbg(3, "%s: dev=%p\n", __FUNCTION__, dev);
1573
1574         if (netif_device_present(dev)) {
1575                 spin_lock_irqsave (&gp->lock, flags);
1576                 update_stats(gp);
1577                 spin_unlock_irqrestore (&gp->lock, flags);
1578         }
1579
1580         return &gp->stats;
1581 }
1582
1583 static void gt96100_cleanup_module(void)
1584 {
1585         int i;
1586         for (i=0; i<NUM_INTERFACES; i++) {
1587                 struct gt96100_if_t *gtif = &gt96100_iflist[i];
1588                 if (gtif->dev != NULL) {
1589                         struct gt96100_private *gp =
1590                                 (struct gt96100_private *)gtif->dev->priv;
1591                         release_region(gtif->iobase, gp->io_size);
1592                         unregister_netdev(gtif->dev);
1593                         if (gtif->dev->priv != NULL)
1594                                 kfree (gtif->dev->priv);
1595                         kfree (gtif->dev);
1596                 }
1597         }
1598 }
1599
1600
1601 #ifndef MODULE
1602
1603 static int __init gt96100_setup(char *options)
1604 {
1605         char *this_opt;
1606
1607         if (!options || !*options)
1608                 return 0;
1609
1610         for(this_opt=strtok(options, ",");
1611             this_opt; this_opt=strtok(NULL, ",")) {
1612                 if (!strncmp(this_opt, "mac0:", 5)) {
1613                         memcpy(mac0, this_opt+5, 17);
1614                         mac0[17]= '\0';
1615                 } else if (!strncmp(this_opt, "mac1:", 5)) {
1616                         memcpy(mac1, this_opt+5, 17);
1617                         mac1[17]= '\0';
1618                 }
1619         }
1620
1621         return 1;
1622 }
1623
1624 __setup("gt96100eth=", gt96100_setup);
1625
1626 #endif /* !MODULE */
1627
1628
1629 module_init(gt96100_init_module);
1630 module_exit(gt96100_cleanup_module);
1631
1632 MODULE_AUTHOR("Steve Longerbeam <stevel@mvista.com>");
1633 MODULE_DESCRIPTION("GT96100 Ethernet driver");