make oldconfig will rebuild these...
[linux-2.4.21-pre4.git] / drivers / net / tulip / interrupt.c
1 /*
2         drivers/net/tulip/interrupt.c
3
4         Maintained by Jeff Garzik <jgarzik@pobox.com>
5         Copyright 2000,2001  The Linux Kernel Team
6         Written/copyright 1994-2001 by Donald Becker.
7
8         This software may be used and distributed according to the terms
9         of the GNU General Public License, incorporated herein by reference.
10
11         Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
12         for more information on this driver, or visit the project
13         Web page at http://sourceforge.net/projects/tulip/
14
15 */
16
17 #include "tulip.h"
18 #include <linux/config.h>
19 #include <linux/etherdevice.h>
20 #include <linux/pci.h>
21
22
23 int tulip_rx_copybreak;
24 unsigned int tulip_max_interrupt_work;
25
26 #ifdef CONFIG_NET_HW_FLOWCONTROL
27
28 #define MIT_SIZE 15
29 unsigned int mit_table[MIT_SIZE+1] =
30 {
31         /*  CRS11 21143 hardware Mitigation Control Interrupt
32             We use only RX mitigation we other techniques for
33             TX intr. mitigation.
34
35            31    Cycle Size (timer control)
36            30:27 TX timer in 16 * Cycle size
37            26:24 TX No pkts before Int.
38            23:20 RX timer in Cycle size
39            19:17 RX No pkts before Int.
40            16       Continues Mode (CM)
41         */
42
43         0x0,             /* IM disabled */
44         0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
45         0x80150000,
46         0x80270000,
47         0x80370000,
48         0x80490000,
49         0x80590000,
50         0x80690000,
51         0x807B0000,
52         0x808B0000,
53         0x809D0000,
54         0x80AD0000,
55         0x80BD0000,
56         0x80CF0000,
57         0x80DF0000,
58 //       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
59         0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
60 };
61 #endif
62
63
64 int tulip_refill_rx(struct net_device *dev)
65 {
66         struct tulip_private *tp = (struct tulip_private *)dev->priv;
67         int entry;
68         int refilled = 0;
69
70         /* Refill the Rx ring buffers. */
71         for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
72                 entry = tp->dirty_rx % RX_RING_SIZE;
73                 if (tp->rx_buffers[entry].skb == NULL) {
74                         struct sk_buff *skb;
75                         dma_addr_t mapping;
76
77                         skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
78                         if (skb == NULL)
79                                 break;
80
81                         mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
82                                                  PCI_DMA_FROMDEVICE);
83                         tp->rx_buffers[entry].mapping = mapping;
84
85                         skb->dev = dev;                 /* Mark as being used by this device. */
86                         tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
87                         refilled++;
88                 }
89                 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
90         }
91 //      if(tp->chip_id == LC82C168) {
92 //              if(((inl(dev->base_addr + CSR5)>>17)&0x07) == 4) {
93 //                      /* Rx stopped due to out of buffers,
94 //                       * restart it
95 //                       */
96 //                      outl(0x01, dev->base_addr + CSR2);
97 //              }
98 //      }
99         return refilled;
100 }
101
102
103 static int tulip_rx(struct net_device *dev)
104 {
105         struct tulip_private *tp = (struct tulip_private *)dev->priv;
106         int entry = tp->cur_rx % RX_RING_SIZE;
107         int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
108         int received = 0;
109
110 #ifdef CONFIG_NET_HW_FLOWCONTROL
111         int drop = 0, mit_sel = 0;
112
113 /* that one buffer is needed for mit activation; or might be a
114    bug in the ring buffer code; check later -- JHS*/
115
116         if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
117 #endif
118
119 //jackl
120 //      if (tulip_debug > 4)
121 //              printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
122 //                         tp->rx_ring[entry].status);
123
124         /* If we own the next entry, it is a new packet. Send it up. */
125         while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
126                 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
127
128 //jackl
129 //              if (tulip_debug > 5)
130 //                      printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
131 //                                 dev->name, entry, status);
132
133                 if (--rx_work_limit < 0)
134                         break;
135                 if ((status & 0x38008300) != 0x0300) {
136                         if ((status & 0x38000300) != 0x0300) {
137                                 /* Ingore earlier buffers. */
138                                 if ((status & 0xffff) != 0x7fff) {
139 //jackl                                 
140 //                                      if (tulip_debug > 1)
141 //                                              printk(KERN_WARNING "%s: Oversized Ethernet frame "
142 //                                                         "spanned multiple buffers, status %8.8x!\n",
143 //                                                         dev->name, status);
144                                         tp->stats.rx_length_errors++;
145                                 }
146                         } else if (status & RxDescFatalErr) {
147 //jackl                         
148                                 /* There was a fatal error. */
149 //                              if (tulip_debug > 2)
150 //                                      printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
151 //                                                 dev->name, status);
152                                 tp->stats.rx_errors++; /* end of a packet.*/
153                                 if (status & 0x0890) tp->stats.rx_length_errors++;
154                                 if (status & 0x0004) tp->stats.rx_frame_errors++;
155                                 if (status & 0x0002) tp->stats.rx_crc_errors++;
156                                 if (status & 0x0001) tp->stats.rx_fifo_errors++;
157                         }
158                 } else {
159                         /* Omit the four octet CRC from the length. */
160                         short pkt_len = ((status >> 16) & 0x7ff) - 4;
161                         struct sk_buff *skb;
162
163 #ifndef final_version
164                         if (pkt_len > 1518) {
165                                 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
166                                            dev->name, pkt_len, pkt_len);
167                                 pkt_len = 1518;
168                                 tp->stats.rx_length_errors++;
169                         }
170 #endif
171
172 #ifdef CONFIG_NET_HW_FLOWCONTROL
173                         drop = atomic_read(&netdev_dropping);
174                         if (drop)
175                                 goto throttle;
176 #endif
177                         /* Check if the packet is long enough to accept without copying
178                            to a minimally-sized skbuff. */
179                         if (pkt_len < tulip_rx_copybreak
180                                 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
181                                 skb->dev = dev;
182                                 skb_reserve(skb, 2);    /* 16 byte align the IP header */
183                                 pci_dma_sync_single(tp->pdev,
184                                                     tp->rx_buffers[entry].mapping,
185                                                     pkt_len, PCI_DMA_FROMDEVICE);
186 #if ! defined(__alpha__)
187                                 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
188                                                  pkt_len, 0);
189                                 skb_put(skb, pkt_len);
190 #else
191                                 memcpy(skb_put(skb, pkt_len),
192                                        tp->rx_buffers[entry].skb->tail,
193                                        pkt_len);
194 #endif
195                         } else {        /* Pass up the skb already on the Rx ring. */
196                                 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
197                                                      pkt_len);
198                                         
199 //#ifndef final_version
200 //                              if (tp->rx_buffers[entry].mapping !=
201 //                                  le32_to_cpu(tp->rx_ring[entry].buffer1)) {
202 //                                      printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
203 //                                             "do not match in tulip_rx: %08x vs. %08x %p / %p.\n",
204 //                                             dev->name,
205 //                                             le32_to_cpu(tp->rx_ring[entry].buffer1),
206 //                                             tp->rx_buffers[entry].mapping,
207 //                                             skb->head, temp);
208 //                              }
209 //#endif
210
211                                 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
212                                                  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
213
214                                 tp->rx_buffers[entry].skb = NULL;
215                                 tp->rx_buffers[entry].mapping = 0;
216                         }
217                         skb->protocol = eth_type_trans(skb, dev);
218 #ifdef CONFIG_NET_HW_FLOWCONTROL
219                         mit_sel =
220 #endif
221                         netif_rx(skb);
222
223 #ifdef CONFIG_NET_HW_FLOWCONTROL
224                         switch (mit_sel) {
225                         case NET_RX_SUCCESS:
226                         case NET_RX_CN_LOW:
227                         case NET_RX_CN_MOD:
228                                 break;
229
230                         case NET_RX_CN_HIGH:
231                                 rx_work_limit -= NET_RX_CN_HIGH; /* additional*/
232                                 break;
233                         case NET_RX_DROP:
234                                 rx_work_limit = -1;
235                                 break;
236                         default:
237                                 printk("unknown feedback return code %d\n", mit_sel);
238                                 break;
239                         }
240
241                         drop = atomic_read(&netdev_dropping);
242                         if (drop) {
243 throttle:
244                                 rx_work_limit = -1;
245                                 mit_sel = NET_RX_DROP;
246
247                                 if (tp->fc_bit) {
248                                         long ioaddr = dev->base_addr;
249
250                                         /* disable Rx & RxNoBuf ints. */
251                                         outl(tulip_tbl[tp->chip_id].valid_intrs&RX_A_NBF_STOP, ioaddr + CSR7);
252                                         set_bit(tp->fc_bit, &netdev_fc_xoff);
253                                 }
254                         }
255 #endif
256                         dev->last_rx = jiffies;
257                         tp->stats.rx_packets++;
258                         tp->stats.rx_bytes += pkt_len;
259                 }
260                 received++;
261                 entry = (++tp->cur_rx) % RX_RING_SIZE;
262         }
263 #ifdef CONFIG_NET_HW_FLOWCONTROL
264
265         /* We use this simplistic scheme for IM. It's proven by
266            real life installations. We can have IM enabled
267            continuesly but this would cause unnecessary latency.
268            Unfortunely we can't use all the NET_RX_* feedback here.
269            This would turn on IM for devices that is not contributing
270            to backlog congestion with unnecessary latency.
271
272            We monitor the device RX-ring and have:
273
274            HW Interrupt Mitigation either ON or OFF.
275
276            ON:  More then 1 pkt received (per intr.) OR we are dropping
277            OFF: Only 1 pkt received
278
279            Note. We only use min and max (0, 15) settings from mit_table */
280
281
282         if( tp->flags &  HAS_INTR_MITIGATION) {
283                 if((received > 1 || mit_sel == NET_RX_DROP)
284                    && tp->mit_sel != 15 ) {
285                         tp->mit_sel = 15;
286                         tp->mit_change = 1; /* Force IM change */
287                 }
288                 if((received <= 1 && mit_sel != NET_RX_DROP) && tp->mit_sel != 0 ) {
289                         tp->mit_sel = 0;
290                         tp->mit_change = 1; /* Force IM change */
291                 }
292         }
293
294         return RX_RING_SIZE+1; /* maxrx+1 */
295 #else
296         return received;
297 #endif
298 }
299
300 static inline void phy_interrupt (struct net_device *dev)
301 {
302 #ifdef __hppa__
303         int csr12 = inl(dev->base_addr + CSR12) & 0xff;
304         struct tulip_private *tp = (struct tulip_private *)dev->priv;
305
306         if (csr12 != tp->csr12_shadow) {
307                 /* ack interrupt */
308                 outl(csr12 | 0x02, dev->base_addr + CSR12);
309                 tp->csr12_shadow = csr12;
310                 /* do link change stuff */
311                 spin_lock(&tp->lock);
312                 tulip_check_duplex(dev);
313                 spin_unlock(&tp->lock);
314                 /* clear irq ack bit */
315                 outl(csr12 & ~0x02, dev->base_addr + CSR12);
316         }
317 #endif
318 }
319
320 /* The interrupt handler does all of the Rx thread work and cleans up
321    after the Tx thread. */
322 void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
323 {
324         struct net_device *dev = (struct net_device *)dev_instance;
325         struct tulip_private *tp = (struct tulip_private *)dev->priv;
326         long ioaddr = dev->base_addr;
327         int csr5;
328         int entry;
329         int missed;
330         int rx = 0;
331         int tx = 0;
332         int oi = 0;
333         int maxrx = RX_RING_SIZE;
334         int maxtx = TX_RING_SIZE;
335         int maxoi = TX_RING_SIZE;
336         unsigned int work_count = tulip_max_interrupt_work;
337
338         /* Let's see whether the interrupt really is for us */
339         csr5 = inl(ioaddr + CSR5);
340
341         if (tp->flags & HAS_PHY_IRQ)
342                 phy_interrupt (dev);
343     
344         if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
345                 return;
346
347         tp->nir++;
348
349         do {
350                 /* Acknowledge all of the current interrupt sources ASAP. */
351                 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
352 //jackl
353 //              if (tulip_debug > 4)
354 //                      printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
355 //                                 dev->name, csr5, inl(dev->base_addr + CSR5));
356
357                 if (csr5 & (RxIntr | RxNoBuf)) {
358 #ifdef CONFIG_NET_HW_FLOWCONTROL
359                         if ((!tp->fc_bit) ||
360                             (!test_bit(tp->fc_bit, &netdev_fc_xoff)))
361 #endif
362                                 rx += tulip_rx(dev);
363                         tulip_refill_rx(dev);
364                 }
365
366                 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
367                         unsigned int dirty_tx;
368
369                         spin_lock(&tp->lock);
370
371                         for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
372                                  dirty_tx++) {
373                                 int entry = dirty_tx % TX_RING_SIZE;
374                                 int status = le32_to_cpu(tp->tx_ring[entry].status);
375
376                                 if (status < 0)
377                                         break;                  /* It still has not been Txed */
378
379                                 /* Check for Rx filter setup frames. */
380                                 if (tp->tx_buffers[entry].skb == NULL) {
381                                         /* test because dummy frames not mapped */
382                                         if (tp->tx_buffers[entry].mapping)
383                                                 pci_unmap_single(tp->pdev,
384                                                          tp->tx_buffers[entry].mapping,
385                                                          sizeof(tp->setup_frame),
386                                                          PCI_DMA_TODEVICE);
387                                         continue;
388                                 }
389
390                                 if (status & 0x8000) {
391                                         /* There was an major error, log it. */
392 //jackl
393 //#ifndef final_version
394 //                                      if (tulip_debug > 1)
395 //                                              printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
396 //                                                         dev->name, status);
397 //#endif
398                                         tp->stats.tx_errors++;
399                                         if (status & 0x4104) tp->stats.tx_aborted_errors++;
400                                         if (status & 0x0C00) tp->stats.tx_carrier_errors++;
401                                         if (status & 0x0200) tp->stats.tx_window_errors++;
402                                         if (status & 0x0002) tp->stats.tx_fifo_errors++;
403                                         if ((status & 0x0080) && tp->full_duplex == 0)
404                                                 tp->stats.tx_heartbeat_errors++;
405                                 } else {
406                                         tp->stats.tx_bytes +=
407                                                 tp->tx_buffers[entry].skb->len;
408                                         tp->stats.collisions += (status >> 3) & 15;
409                                         tp->stats.tx_packets++;
410                                 }
411
412                                 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
413                                                  tp->tx_buffers[entry].skb->len,
414                                                  PCI_DMA_TODEVICE);
415
416                                 /* Free the original skb. */
417                                 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
418                                 tp->tx_buffers[entry].skb = NULL;
419                                 tp->tx_buffers[entry].mapping = 0;
420                                 tx++;
421                         }
422
423 #ifndef final_version
424                         if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
425                                 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
426                                            dev->name, dirty_tx, tp->cur_tx);
427                                 dirty_tx += TX_RING_SIZE;
428                         }
429 #endif
430
431                         if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
432                                 netif_wake_queue(dev);
433
434                         tp->dirty_tx = dirty_tx;
435                         if (csr5 & TxDied) {
436 //jackl                         
437 //                              if (tulip_debug > 2)
438 //                                      printk(KERN_WARNING "%s: The transmitter stopped."
439 //                                                 "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
440 //                                                 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
441                                 tulip_restart_rxtx(tp);
442                         }
443                         spin_unlock(&tp->lock);
444                 }
445
446                 /* Log errors. */
447                 if (csr5 & AbnormalIntr) {      /* Abnormal error summary bit. */
448                         if (csr5 == 0xffffffff)
449                                 break;
450                         if (csr5 & TxJabber) tp->stats.tx_errors++;
451                         if (csr5 & TxFIFOUnderflow) {
452                                 if ((tp->csr6 & 0xC000) != 0xC000)
453                                         tp->csr6 += 0x4000;     /* Bump up the Tx threshold */
454                                 else
455                                         tp->csr6 |= 0x00200000;  /* Store-n-forward. */
456                                 /* Restart the transmit process. */
457                                 tulip_restart_rxtx(tp);
458                                 outl(0, ioaddr + CSR1);
459                         }
460                         if (csr5 & (RxDied | RxNoBuf)) {
461                                 if (tp->flags & COMET_MAC_ADDR) {
462                                         outl(tp->mc_filter[0], ioaddr + 0xAC);
463                                         outl(tp->mc_filter[1], ioaddr + 0xB0);
464                                 }
465                         }
466                         if (csr5 & RxDied) {            /* Missed a Rx frame. */
467                                 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
468 #ifdef CONFIG_NET_HW_FLOWCONTROL
469                                 if (tp->fc_bit && !test_bit(tp->fc_bit, &netdev_fc_xoff)) {
470                                         tp->stats.rx_errors++;
471                                         tulip_start_rxtx(tp);
472                                 }
473 #else
474                                 tp->stats.rx_errors++;
475                                 tulip_start_rxtx(tp);
476 #endif
477                         }
478                         /*
479                          * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
480                          * call is ever done under the spinlock
481                          */
482                         if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
483                                 if (tp->link_change)
484                                         (tp->link_change)(dev, csr5);
485                         }
486                         if (csr5 & SytemError) {
487                                 int error = (csr5 >> 23) & 7;
488                                 /* oops, we hit a PCI error.  The code produced corresponds
489                                  * to the reason:
490                                  *  0 - parity error
491                                  *  1 - master abort
492                                  *  2 - target abort
493                                  * Note that on parity error, we should do a software reset
494                                  * of the chip to get it back into a sane state (according
495                                  * to the 21142/3 docs that is).
496                                  *   -- rmk
497                                  */
498                                 printk(KERN_ERR "%s: (%lu) System Error occured (%d)\n",
499                                         dev->name, tp->nir, error);
500                         }
501                         /* Clear all error sources, included undocumented ones! */
502                         outl(0x0800f7ba, ioaddr + CSR5);
503                         oi++;
504                 }
505                 if (csr5 & TimerInt) {
506 //jackl
507 //                      if (tulip_debug > 2)
508 //                              printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
509 //                                         dev->name, csr5);
510 #ifdef CONFIG_NET_HW_FLOWCONTROL
511                         if (tp->fc_bit && (test_bit(tp->fc_bit, &netdev_fc_xoff)))
512                           if (net_ratelimit()) printk("BUG!! enabling interupt when FC off (timerintr.) \n");
513 #endif
514                         outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
515                         tp->ttimer = 0;
516                         oi++;
517                 }
518                 if (tx > maxtx || rx > maxrx || oi > maxoi) {
519 //jackl                 
520 //                      if (tulip_debug > 1)
521 //                              printk(KERN_WARNING "%s: Too much work during an interrupt, "
522 //                                         "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
523
524                        /* Acknowledge all interrupt sources. */
525                         outl(0x8001ffff, ioaddr + CSR5);
526                         if (tp->flags & HAS_INTR_MITIGATION) {
527 #ifdef CONFIG_NET_HW_FLOWCONTROL
528                                 if(tp->mit_change) {
529                                         outl(mit_table[tp->mit_sel], ioaddr + CSR11);
530                                         tp->mit_change = 0;
531                                 }
532 #else
533                      /* Josip Loncaric at ICASE did extensive experimentation
534                         to develop a good interrupt mitigation setting.*/
535                                 outl(0x8b240000, ioaddr + CSR11);
536 #endif
537                         } else if (tp->chip_id == LC82C168) {
538                                 /* the LC82C168 doesn't have a hw timer.*/
539                                 outl(0x00, ioaddr + CSR7);
540                                 mod_timer(&tp->timer, RUN_AT(HZ/50));
541                         } else {
542                           /* Mask all interrupting sources, set timer to
543                                 re-enable. */
544 #ifndef CONFIG_NET_HW_FLOWCONTROL
545                                 outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
546                                 outl(0x0012, ioaddr + CSR11);
547 #endif
548                         }
549                         break;
550                 }
551
552                 work_count--;
553                 if (work_count == 0)
554                         break;
555
556                 csr5 = inl(ioaddr + CSR5);
557         } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
558
559         tulip_refill_rx(dev);
560
561         /* check if the card is in suspend mode */
562         entry = tp->dirty_rx % RX_RING_SIZE;
563 //      if (tp->rx_buffers[entry].skb == NULL) {
564 //jackl         
565 //              if (tulip_debug > 1)
566 //                      printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
567 //              if (tp->chip_id == LC82C168) {
568 //                      outl(0x00, ioaddr + CSR7);
569 //                      mod_timer(&tp->timer, RUN_AT(HZ/50));
570 //              } else {
571 //                      if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
572 //jackl
573 //                              if (tulip_debug > 1)
574 //                                      printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
575 //                              outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
576 //                                      ioaddr + CSR7);
577 //                              outl(TimerInt, ioaddr + CSR5);
578 //                              outl(12, ioaddr + CSR11);
579 //                              tp->ttimer = 1;
580 //                      }
581 //              }
582 //      }
583
584         if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
585                 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
586         }
587
588 //jackl
589 //      if (tulip_debug > 4)
590 //              printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
591 //                         dev->name, inl(ioaddr + CSR5));
592
593 }