2 drivers/net/tulip/interrupt.c
4 Maintained by Jeff Garzik <jgarzik@pobox.com>
5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker.
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
11 Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
12 for more information on this driver, or visit the project
13 Web page at http://sourceforge.net/projects/tulip/
18 #include <linux/config.h>
19 #include <linux/etherdevice.h>
20 #include <linux/pci.h>
23 int tulip_rx_copybreak;
24 unsigned int tulip_max_interrupt_work;
26 #ifdef CONFIG_NET_HW_FLOWCONTROL
29 unsigned int mit_table[MIT_SIZE+1] =
31 /* CRS11 21143 hardware Mitigation Control Interrupt
32 We use only RX mitigation we other techniques for
35 31 Cycle Size (timer control)
36 30:27 TX timer in 16 * Cycle size
37 26:24 TX No pkts before Int.
38 23:20 RX timer in Cycle size
39 19:17 RX No pkts before Int.
40 16 Continues Mode (CM)
43 0x0, /* IM disabled */
44 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
58 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
59 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
64 int tulip_refill_rx(struct net_device *dev)
66 struct tulip_private *tp = (struct tulip_private *)dev->priv;
70 /* Refill the Rx ring buffers. */
71 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
72 entry = tp->dirty_rx % RX_RING_SIZE;
73 if (tp->rx_buffers[entry].skb == NULL) {
77 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
81 mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
83 tp->rx_buffers[entry].mapping = mapping;
85 skb->dev = dev; /* Mark as being used by this device. */
86 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
89 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
91 // if(tp->chip_id == LC82C168) {
92 // if(((inl(dev->base_addr + CSR5)>>17)&0x07) == 4) {
93 // /* Rx stopped due to out of buffers,
96 // outl(0x01, dev->base_addr + CSR2);
103 static int tulip_rx(struct net_device *dev)
105 struct tulip_private *tp = (struct tulip_private *)dev->priv;
106 int entry = tp->cur_rx % RX_RING_SIZE;
107 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
110 #ifdef CONFIG_NET_HW_FLOWCONTROL
111 int drop = 0, mit_sel = 0;
113 /* that one buffer is needed for mit activation; or might be a
114 bug in the ring buffer code; check later -- JHS*/
116 if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
120 // if (tulip_debug > 4)
121 // printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
122 // tp->rx_ring[entry].status);
124 /* If we own the next entry, it is a new packet. Send it up. */
125 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
126 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
129 // if (tulip_debug > 5)
130 // printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
131 // dev->name, entry, status);
133 if (--rx_work_limit < 0)
135 if ((status & 0x38008300) != 0x0300) {
136 if ((status & 0x38000300) != 0x0300) {
137 /* Ingore earlier buffers. */
138 if ((status & 0xffff) != 0x7fff) {
140 // if (tulip_debug > 1)
141 // printk(KERN_WARNING "%s: Oversized Ethernet frame "
142 // "spanned multiple buffers, status %8.8x!\n",
143 // dev->name, status);
144 tp->stats.rx_length_errors++;
146 } else if (status & RxDescFatalErr) {
148 /* There was a fatal error. */
149 // if (tulip_debug > 2)
150 // printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
151 // dev->name, status);
152 tp->stats.rx_errors++; /* end of a packet.*/
153 if (status & 0x0890) tp->stats.rx_length_errors++;
154 if (status & 0x0004) tp->stats.rx_frame_errors++;
155 if (status & 0x0002) tp->stats.rx_crc_errors++;
156 if (status & 0x0001) tp->stats.rx_fifo_errors++;
159 /* Omit the four octet CRC from the length. */
160 short pkt_len = ((status >> 16) & 0x7ff) - 4;
163 #ifndef final_version
164 if (pkt_len > 1518) {
165 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
166 dev->name, pkt_len, pkt_len);
168 tp->stats.rx_length_errors++;
172 #ifdef CONFIG_NET_HW_FLOWCONTROL
173 drop = atomic_read(&netdev_dropping);
177 /* Check if the packet is long enough to accept without copying
178 to a minimally-sized skbuff. */
179 if (pkt_len < tulip_rx_copybreak
180 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
182 skb_reserve(skb, 2); /* 16 byte align the IP header */
183 pci_dma_sync_single(tp->pdev,
184 tp->rx_buffers[entry].mapping,
185 pkt_len, PCI_DMA_FROMDEVICE);
186 #if ! defined(__alpha__)
187 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
189 skb_put(skb, pkt_len);
191 memcpy(skb_put(skb, pkt_len),
192 tp->rx_buffers[entry].skb->tail,
195 } else { /* Pass up the skb already on the Rx ring. */
196 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
199 //#ifndef final_version
200 // if (tp->rx_buffers[entry].mapping !=
201 // le32_to_cpu(tp->rx_ring[entry].buffer1)) {
202 // printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
203 // "do not match in tulip_rx: %08x vs. %08x %p / %p.\n",
205 // le32_to_cpu(tp->rx_ring[entry].buffer1),
206 // tp->rx_buffers[entry].mapping,
211 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
212 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
214 tp->rx_buffers[entry].skb = NULL;
215 tp->rx_buffers[entry].mapping = 0;
217 skb->protocol = eth_type_trans(skb, dev);
218 #ifdef CONFIG_NET_HW_FLOWCONTROL
223 #ifdef CONFIG_NET_HW_FLOWCONTROL
231 rx_work_limit -= NET_RX_CN_HIGH; /* additional*/
237 printk("unknown feedback return code %d\n", mit_sel);
241 drop = atomic_read(&netdev_dropping);
245 mit_sel = NET_RX_DROP;
248 long ioaddr = dev->base_addr;
250 /* disable Rx & RxNoBuf ints. */
251 outl(tulip_tbl[tp->chip_id].valid_intrs&RX_A_NBF_STOP, ioaddr + CSR7);
252 set_bit(tp->fc_bit, &netdev_fc_xoff);
256 dev->last_rx = jiffies;
257 tp->stats.rx_packets++;
258 tp->stats.rx_bytes += pkt_len;
261 entry = (++tp->cur_rx) % RX_RING_SIZE;
263 #ifdef CONFIG_NET_HW_FLOWCONTROL
265 /* We use this simplistic scheme for IM. It's proven by
266 real life installations. We can have IM enabled
267 continuesly but this would cause unnecessary latency.
268 Unfortunely we can't use all the NET_RX_* feedback here.
269 This would turn on IM for devices that is not contributing
270 to backlog congestion with unnecessary latency.
272 We monitor the device RX-ring and have:
274 HW Interrupt Mitigation either ON or OFF.
276 ON: More then 1 pkt received (per intr.) OR we are dropping
277 OFF: Only 1 pkt received
279 Note. We only use min and max (0, 15) settings from mit_table */
282 if( tp->flags & HAS_INTR_MITIGATION) {
283 if((received > 1 || mit_sel == NET_RX_DROP)
284 && tp->mit_sel != 15 ) {
286 tp->mit_change = 1; /* Force IM change */
288 if((received <= 1 && mit_sel != NET_RX_DROP) && tp->mit_sel != 0 ) {
290 tp->mit_change = 1; /* Force IM change */
294 return RX_RING_SIZE+1; /* maxrx+1 */
300 static inline void phy_interrupt (struct net_device *dev)
303 int csr12 = inl(dev->base_addr + CSR12) & 0xff;
304 struct tulip_private *tp = (struct tulip_private *)dev->priv;
306 if (csr12 != tp->csr12_shadow) {
308 outl(csr12 | 0x02, dev->base_addr + CSR12);
309 tp->csr12_shadow = csr12;
310 /* do link change stuff */
311 spin_lock(&tp->lock);
312 tulip_check_duplex(dev);
313 spin_unlock(&tp->lock);
314 /* clear irq ack bit */
315 outl(csr12 & ~0x02, dev->base_addr + CSR12);
320 /* The interrupt handler does all of the Rx thread work and cleans up
321 after the Tx thread. */
322 void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
324 struct net_device *dev = (struct net_device *)dev_instance;
325 struct tulip_private *tp = (struct tulip_private *)dev->priv;
326 long ioaddr = dev->base_addr;
333 int maxrx = RX_RING_SIZE;
334 int maxtx = TX_RING_SIZE;
335 int maxoi = TX_RING_SIZE;
336 unsigned int work_count = tulip_max_interrupt_work;
338 /* Let's see whether the interrupt really is for us */
339 csr5 = inl(ioaddr + CSR5);
341 if (tp->flags & HAS_PHY_IRQ)
344 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
350 /* Acknowledge all of the current interrupt sources ASAP. */
351 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
353 // if (tulip_debug > 4)
354 // printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
355 // dev->name, csr5, inl(dev->base_addr + CSR5));
357 if (csr5 & (RxIntr | RxNoBuf)) {
358 #ifdef CONFIG_NET_HW_FLOWCONTROL
360 (!test_bit(tp->fc_bit, &netdev_fc_xoff)))
363 tulip_refill_rx(dev);
366 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
367 unsigned int dirty_tx;
369 spin_lock(&tp->lock);
371 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
373 int entry = dirty_tx % TX_RING_SIZE;
374 int status = le32_to_cpu(tp->tx_ring[entry].status);
377 break; /* It still has not been Txed */
379 /* Check for Rx filter setup frames. */
380 if (tp->tx_buffers[entry].skb == NULL) {
381 /* test because dummy frames not mapped */
382 if (tp->tx_buffers[entry].mapping)
383 pci_unmap_single(tp->pdev,
384 tp->tx_buffers[entry].mapping,
385 sizeof(tp->setup_frame),
390 if (status & 0x8000) {
391 /* There was an major error, log it. */
393 //#ifndef final_version
394 // if (tulip_debug > 1)
395 // printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
396 // dev->name, status);
398 tp->stats.tx_errors++;
399 if (status & 0x4104) tp->stats.tx_aborted_errors++;
400 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
401 if (status & 0x0200) tp->stats.tx_window_errors++;
402 if (status & 0x0002) tp->stats.tx_fifo_errors++;
403 if ((status & 0x0080) && tp->full_duplex == 0)
404 tp->stats.tx_heartbeat_errors++;
406 tp->stats.tx_bytes +=
407 tp->tx_buffers[entry].skb->len;
408 tp->stats.collisions += (status >> 3) & 15;
409 tp->stats.tx_packets++;
412 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
413 tp->tx_buffers[entry].skb->len,
416 /* Free the original skb. */
417 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
418 tp->tx_buffers[entry].skb = NULL;
419 tp->tx_buffers[entry].mapping = 0;
423 #ifndef final_version
424 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
425 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
426 dev->name, dirty_tx, tp->cur_tx);
427 dirty_tx += TX_RING_SIZE;
431 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
432 netif_wake_queue(dev);
434 tp->dirty_tx = dirty_tx;
437 // if (tulip_debug > 2)
438 // printk(KERN_WARNING "%s: The transmitter stopped."
439 // " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
440 // dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
441 tulip_restart_rxtx(tp);
443 spin_unlock(&tp->lock);
447 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
448 if (csr5 == 0xffffffff)
450 if (csr5 & TxJabber) tp->stats.tx_errors++;
451 if (csr5 & TxFIFOUnderflow) {
452 if ((tp->csr6 & 0xC000) != 0xC000)
453 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
455 tp->csr6 |= 0x00200000; /* Store-n-forward. */
456 /* Restart the transmit process. */
457 tulip_restart_rxtx(tp);
458 outl(0, ioaddr + CSR1);
460 if (csr5 & (RxDied | RxNoBuf)) {
461 if (tp->flags & COMET_MAC_ADDR) {
462 outl(tp->mc_filter[0], ioaddr + 0xAC);
463 outl(tp->mc_filter[1], ioaddr + 0xB0);
466 if (csr5 & RxDied) { /* Missed a Rx frame. */
467 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
468 #ifdef CONFIG_NET_HW_FLOWCONTROL
469 if (tp->fc_bit && !test_bit(tp->fc_bit, &netdev_fc_xoff)) {
470 tp->stats.rx_errors++;
471 tulip_start_rxtx(tp);
474 tp->stats.rx_errors++;
475 tulip_start_rxtx(tp);
479 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
480 * call is ever done under the spinlock
482 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
484 (tp->link_change)(dev, csr5);
486 if (csr5 & SytemError) {
487 int error = (csr5 >> 23) & 7;
488 /* oops, we hit a PCI error. The code produced corresponds
493 * Note that on parity error, we should do a software reset
494 * of the chip to get it back into a sane state (according
495 * to the 21142/3 docs that is).
498 printk(KERN_ERR "%s: (%lu) System Error occured (%d)\n",
499 dev->name, tp->nir, error);
501 /* Clear all error sources, included undocumented ones! */
502 outl(0x0800f7ba, ioaddr + CSR5);
505 if (csr5 & TimerInt) {
507 // if (tulip_debug > 2)
508 // printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
510 #ifdef CONFIG_NET_HW_FLOWCONTROL
511 if (tp->fc_bit && (test_bit(tp->fc_bit, &netdev_fc_xoff)))
512 if (net_ratelimit()) printk("BUG!! enabling interupt when FC off (timerintr.) \n");
514 outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
518 if (tx > maxtx || rx > maxrx || oi > maxoi) {
520 // if (tulip_debug > 1)
521 // printk(KERN_WARNING "%s: Too much work during an interrupt, "
522 // "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
524 /* Acknowledge all interrupt sources. */
525 outl(0x8001ffff, ioaddr + CSR5);
526 if (tp->flags & HAS_INTR_MITIGATION) {
527 #ifdef CONFIG_NET_HW_FLOWCONTROL
529 outl(mit_table[tp->mit_sel], ioaddr + CSR11);
533 /* Josip Loncaric at ICASE did extensive experimentation
534 to develop a good interrupt mitigation setting.*/
535 outl(0x8b240000, ioaddr + CSR11);
537 } else if (tp->chip_id == LC82C168) {
538 /* the LC82C168 doesn't have a hw timer.*/
539 outl(0x00, ioaddr + CSR7);
540 mod_timer(&tp->timer, RUN_AT(HZ/50));
542 /* Mask all interrupting sources, set timer to
544 #ifndef CONFIG_NET_HW_FLOWCONTROL
545 outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
546 outl(0x0012, ioaddr + CSR11);
556 csr5 = inl(ioaddr + CSR5);
557 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
559 tulip_refill_rx(dev);
561 /* check if the card is in suspend mode */
562 entry = tp->dirty_rx % RX_RING_SIZE;
563 // if (tp->rx_buffers[entry].skb == NULL) {
565 // if (tulip_debug > 1)
566 // printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
567 // if (tp->chip_id == LC82C168) {
568 // outl(0x00, ioaddr + CSR7);
569 // mod_timer(&tp->timer, RUN_AT(HZ/50));
571 // if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
573 // if (tulip_debug > 1)
574 // printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
575 // outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
577 // outl(TimerInt, ioaddr + CSR5);
578 // outl(12, ioaddr + CSR11);
584 if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
585 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
589 // if (tulip_debug > 4)
590 // printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
591 // dev->name, inl(ioaddr + CSR5));