2 drivers/net/tulip/interrupt.c
4 Maintained by Jeff Garzik <jgarzik@pobox.com>
5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker.
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
11 Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
12 for more information on this driver, or visit the project
13 Web page at http://sourceforge.net/projects/tulip/
18 #include <linux/config.h>
19 #include <linux/etherdevice.h>
20 #include <linux/pci.h>
22 int tulip_rx_copybreak;
23 unsigned int tulip_max_interrupt_work;
25 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
27 #define MIT_TABLE 15 /* We use 0 or max */
29 unsigned int mit_table[MIT_SIZE+1] =
31 /* CRS11 21143 hardware Mitigation Control Interrupt
32 We use only RX mitigation we other techniques for
35 31 Cycle Size (timer control)
36 30:27 TX timer in 16 * Cycle size
37 26:24 TX No pkts before Int.
38 23:20 RX timer in Cycle size
39 19:17 RX No pkts before Int.
40 16 Continues Mode (CM)
43 0x0, /* IM disabled */
44 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
58 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
59 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
64 int tulip_refill_rx(struct net_device *dev)
66 struct tulip_private *tp = netdev_priv(dev);
70 /* Refill the Rx ring buffers. */
71 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
72 entry = tp->dirty_rx % RX_RING_SIZE;
73 if (tp->rx_buffers[entry].skb == NULL) {
77 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
81 mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
83 tp->rx_buffers[entry].mapping = mapping;
85 skb->dev = dev; /* Mark as being used by this device. */
86 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
89 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
91 if(tp->chip_id == LC82C168) {
92 if(((inl(dev->base_addr + CSR5)>>17)&0x07) == 4) {
93 /* Rx stopped due to out of buffers,
96 outl(0x01, dev->base_addr + CSR2);
102 #ifdef CONFIG_TULIP_NAPI
104 void oom_timer(unsigned long data)
106 struct net_device *dev = (struct net_device *)data;
107 netif_rx_schedule(dev);
110 int tulip_poll(struct net_device *dev, int *budget)
112 struct tulip_private *tp = netdev_priv(dev);
113 int entry = tp->cur_rx % RX_RING_SIZE;
114 int rx_work_limit = *budget;
117 if (!netif_running(dev))
120 if (rx_work_limit > dev->quota)
121 rx_work_limit = dev->quota;
123 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
125 /* that one buffer is needed for mit activation; or might be a
126 bug in the ring buffer code; check later -- JHS*/
128 if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
132 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
133 tp->rx_ring[entry].status);
136 /* Acknowledge current RX interrupt sources. */
137 outl((RxIntr | RxNoBuf), dev->base_addr + CSR5);
140 /* If we own the next entry, it is a new packet. Send it up. */
141 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
145 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
149 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
150 dev->name, entry, status);
151 if (--rx_work_limit < 0)
154 if ((status & 0x38008300) != 0x0300) {
155 if ((status & 0x38000300) != 0x0300) {
156 /* Ingore earlier buffers. */
157 if ((status & 0xffff) != 0x7fff) {
159 printk(KERN_WARNING "%s: Oversized Ethernet frame "
160 "spanned multiple buffers, status %8.8x!\n",
162 tp->stats.rx_length_errors++;
164 } else if (status & RxDescFatalErr) {
165 /* There was a fatal error. */
167 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
169 tp->stats.rx_errors++; /* end of a packet.*/
170 if (status & 0x0890) tp->stats.rx_length_errors++;
171 if (status & 0x0004) tp->stats.rx_frame_errors++;
172 if (status & 0x0002) tp->stats.rx_crc_errors++;
173 if (status & 0x0001) tp->stats.rx_fifo_errors++;
176 /* Omit the four octet CRC from the length. */
177 short pkt_len = ((status >> 16) & 0x7ff) - 4;
180 #ifndef final_version
181 if (pkt_len > 1518) {
182 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
183 dev->name, pkt_len, pkt_len);
185 tp->stats.rx_length_errors++;
188 /* Check if the packet is long enough to accept without copying
189 to a minimally-sized skbuff. */
190 if (pkt_len < tulip_rx_copybreak
191 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
193 skb_reserve(skb, 2); /* 16 byte align the IP header */
194 pci_dma_sync_single_for_cpu(tp->pdev,
195 tp->rx_buffers[entry].mapping,
196 pkt_len, PCI_DMA_FROMDEVICE);
197 #if ! defined(__alpha__)
198 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
200 skb_put(skb, pkt_len);
202 memcpy(skb_put(skb, pkt_len),
203 tp->rx_buffers[entry].skb->tail,
206 pci_dma_sync_single_for_device(tp->pdev,
207 tp->rx_buffers[entry].mapping,
208 pkt_len, PCI_DMA_FROMDEVICE);
209 } else { /* Pass up the skb already on the Rx ring. */
210 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
213 #ifndef final_version
214 if (tp->rx_buffers[entry].mapping !=
215 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
216 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
217 "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
219 le32_to_cpu(tp->rx_ring[entry].buffer1),
220 (unsigned long long)tp->rx_buffers[entry].mapping,
225 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
226 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
228 tp->rx_buffers[entry].skb = NULL;
229 tp->rx_buffers[entry].mapping = 0;
231 skb->protocol = eth_type_trans(skb, dev);
233 netif_receive_skb(skb);
235 dev->last_rx = jiffies;
236 tp->stats.rx_packets++;
237 tp->stats.rx_bytes += pkt_len;
241 entry = (++tp->cur_rx) % RX_RING_SIZE;
242 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
243 tulip_refill_rx(dev);
247 /* New ack strategy... irq does not ack Rx any longer
248 hopefully this helps */
250 /* Really bad things can happen here... If new packet arrives
251 * and an irq arrives (tx or just due to occasionally unset
252 * mask), it will be acked by irq handler, but new thread
253 * is not scheduled. It is major hole in design.
254 * No idea how to fix this if "playing with fire" will fail
255 * tomorrow (night 011029). If it will not fail, we won
256 * finally: amount of IO did not increase at all. */
257 } while ((inl(dev->base_addr + CSR5) & RxIntr));
261 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
263 /* We use this simplistic scheme for IM. It's proven by
264 real life installations. We can have IM enabled
265 continuesly but this would cause unnecessary latency.
266 Unfortunely we can't use all the NET_RX_* feedback here.
267 This would turn on IM for devices that is not contributing
268 to backlog congestion with unnecessary latency.
270 We monitor the the device RX-ring and have:
272 HW Interrupt Mitigation either ON or OFF.
274 ON: More then 1 pkt received (per intr.) OR we are dropping
275 OFF: Only 1 pkt received
277 Note. We only use min and max (0, 15) settings from mit_table */
280 if( tp->flags & HAS_INTR_MITIGATION) {
284 outl(mit_table[MIT_TABLE], dev->base_addr + CSR11);
290 outl(0, dev->base_addr + CSR11);
295 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
297 dev->quota -= received;
300 tulip_refill_rx(dev);
302 /* If RX ring is not full we are out of memory. */
303 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
305 /* Remove us from polling list and enable RX intr. */
307 netif_rx_complete(dev);
308 outl(tulip_tbl[tp->chip_id].valid_intrs, dev->base_addr+CSR7);
310 /* The last op happens after poll completion. Which means the following:
311 * 1. it can race with disabling irqs in irq handler
312 * 2. it can race with dise/enabling irqs in other poll threads
313 * 3. if an irq raised after beginning loop, it will be immediately
316 * Summarizing: the logic results in some redundant irqs both
317 * due to races in masking and due to too late acking of already
318 * processed irqs. But it must not result in losing events.
326 received = dev->quota; /* Not to happen */
328 dev->quota -= received;
331 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
332 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
333 tulip_refill_rx(dev);
335 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
340 oom: /* Executed with RX ints disabled */
343 /* Start timer, stop polling, but do not enable rx interrupts. */
344 mod_timer(&tp->oom_timer, jiffies+1);
346 /* Think: timer_pending() was an explicit signature of bug.
347 * Timer can be pending now but fired and completed
348 * before we did netif_rx_complete(). See? We would lose it. */
350 /* remove ourselves from the polling list */
351 netif_rx_complete(dev);
356 #else /* CONFIG_TULIP_NAPI */
358 static int tulip_rx(struct net_device *dev)
360 struct tulip_private *tp = netdev_priv(dev);
361 int entry = tp->cur_rx % RX_RING_SIZE;
362 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
366 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
367 tp->rx_ring[entry].status);
368 /* If we own the next entry, it is a new packet. Send it up. */
369 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
370 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
373 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
374 dev->name, entry, status);
375 if (--rx_work_limit < 0)
377 if ((status & 0x38008300) != 0x0300) {
378 if ((status & 0x38000300) != 0x0300) {
379 /* Ingore earlier buffers. */
380 if ((status & 0xffff) != 0x7fff) {
382 printk(KERN_WARNING "%s: Oversized Ethernet frame "
383 "spanned multiple buffers, status %8.8x!\n",
385 tp->stats.rx_length_errors++;
387 } else if (status & RxDescFatalErr) {
388 /* There was a fatal error. */
390 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
392 tp->stats.rx_errors++; /* end of a packet.*/
393 if (status & 0x0890) tp->stats.rx_length_errors++;
394 if (status & 0x0004) tp->stats.rx_frame_errors++;
395 if (status & 0x0002) tp->stats.rx_crc_errors++;
396 if (status & 0x0001) tp->stats.rx_fifo_errors++;
399 /* Omit the four octet CRC from the length. */
400 short pkt_len = ((status >> 16) & 0x7ff) - 4;
403 #ifndef final_version
404 if (pkt_len > 1518) {
405 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
406 dev->name, pkt_len, pkt_len);
408 tp->stats.rx_length_errors++;
412 /* Check if the packet is long enough to accept without copying
413 to a minimally-sized skbuff. */
414 if (pkt_len < tulip_rx_copybreak
415 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
417 skb_reserve(skb, 2); /* 16 byte align the IP header */
418 pci_dma_sync_single_for_cpu(tp->pdev,
419 tp->rx_buffers[entry].mapping,
420 pkt_len, PCI_DMA_FROMDEVICE);
421 #if ! defined(__alpha__)
422 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
424 skb_put(skb, pkt_len);
426 memcpy(skb_put(skb, pkt_len),
427 tp->rx_buffers[entry].skb->tail,
430 pci_dma_sync_single_for_device(tp->pdev,
431 tp->rx_buffers[entry].mapping,
432 pkt_len, PCI_DMA_FROMDEVICE);
433 } else { /* Pass up the skb already on the Rx ring. */
434 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
437 #ifndef final_version
438 if (tp->rx_buffers[entry].mapping !=
439 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
440 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
441 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
443 le32_to_cpu(tp->rx_ring[entry].buffer1),
444 (long long)tp->rx_buffers[entry].mapping,
449 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
450 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
452 tp->rx_buffers[entry].skb = NULL;
453 tp->rx_buffers[entry].mapping = 0;
455 skb->protocol = eth_type_trans(skb, dev);
459 dev->last_rx = jiffies;
460 tp->stats.rx_packets++;
461 tp->stats.rx_bytes += pkt_len;
464 entry = (++tp->cur_rx) % RX_RING_SIZE;
468 #endif /* CONFIG_TULIP_NAPI */
470 static inline unsigned int phy_interrupt (struct net_device *dev)
473 int csr12 = inl(dev->base_addr + CSR12) & 0xff;
474 struct tulip_private *tp = netdev_priv(dev);
476 if (csr12 != tp->csr12_shadow) {
478 outl(csr12 | 0x02, dev->base_addr + CSR12);
479 tp->csr12_shadow = csr12;
480 /* do link change stuff */
481 spin_lock(&tp->lock);
482 tulip_check_duplex(dev);
483 spin_unlock(&tp->lock);
484 /* clear irq ack bit */
485 outl(csr12 & ~0x02, dev->base_addr + CSR12);
494 /* The interrupt handler does all of the Rx thread work and cleans up
495 after the Tx thread. */
496 irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
498 struct net_device *dev = (struct net_device *)dev_instance;
499 struct tulip_private *tp = netdev_priv(dev);
500 long ioaddr = dev->base_addr;
506 int maxrx = RX_RING_SIZE;
507 int maxtx = TX_RING_SIZE;
508 int maxoi = TX_RING_SIZE;
509 #ifdef CONFIG_TULIP_NAPI
514 unsigned int work_count = tulip_max_interrupt_work;
515 unsigned int handled = 0;
517 /* Let's see whether the interrupt really is for us */
518 csr5 = inl(ioaddr + CSR5);
520 if (tp->flags & HAS_PHY_IRQ)
521 handled = phy_interrupt (dev);
523 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
524 return IRQ_RETVAL(handled);
530 #ifdef CONFIG_TULIP_NAPI
532 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
534 /* Mask RX intrs and add the device to poll list. */
535 outl(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
536 netif_rx_schedule(dev);
538 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
542 /* Acknowledge the interrupt sources we handle here ASAP
543 the poll function does Rx and RxNoBuf acking */
545 outl(csr5 & 0x0001ff3f, ioaddr + CSR5);
548 /* Acknowledge all of the current interrupt sources ASAP. */
549 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
552 if (csr5 & (RxIntr | RxNoBuf)) {
554 tulip_refill_rx(dev);
557 #endif /* CONFIG_TULIP_NAPI */
560 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
561 dev->name, csr5, inl(dev->base_addr + CSR5));
564 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
565 unsigned int dirty_tx;
567 spin_lock(&tp->lock);
569 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
571 int entry = dirty_tx % TX_RING_SIZE;
572 int status = le32_to_cpu(tp->tx_ring[entry].status);
575 break; /* It still has not been Txed */
577 /* Check for Rx filter setup frames. */
578 if (tp->tx_buffers[entry].skb == NULL) {
579 /* test because dummy frames not mapped */
580 if (tp->tx_buffers[entry].mapping)
581 pci_unmap_single(tp->pdev,
582 tp->tx_buffers[entry].mapping,
583 sizeof(tp->setup_frame),
588 if (status & 0x8000) {
589 /* There was an major error, log it. */
590 #ifndef final_version
592 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
595 tp->stats.tx_errors++;
596 if (status & 0x4104) tp->stats.tx_aborted_errors++;
597 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
598 if (status & 0x0200) tp->stats.tx_window_errors++;
599 if (status & 0x0002) tp->stats.tx_fifo_errors++;
600 if ((status & 0x0080) && tp->full_duplex == 0)
601 tp->stats.tx_heartbeat_errors++;
603 tp->stats.tx_bytes +=
604 tp->tx_buffers[entry].skb->len;
605 tp->stats.collisions += (status >> 3) & 15;
606 tp->stats.tx_packets++;
609 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
610 tp->tx_buffers[entry].skb->len,
613 /* Free the original skb. */
614 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
615 tp->tx_buffers[entry].skb = NULL;
616 tp->tx_buffers[entry].mapping = 0;
620 #ifndef final_version
621 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
622 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
623 dev->name, dirty_tx, tp->cur_tx);
624 dirty_tx += TX_RING_SIZE;
628 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
629 netif_wake_queue(dev);
631 tp->dirty_tx = dirty_tx;
634 printk(KERN_WARNING "%s: The transmitter stopped."
635 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
636 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
637 tulip_restart_rxtx(tp);
639 spin_unlock(&tp->lock);
643 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
644 if (csr5 == 0xffffffff)
646 if (csr5 & TxJabber) tp->stats.tx_errors++;
647 if (csr5 & TxFIFOUnderflow) {
648 if ((tp->csr6 & 0xC000) != 0xC000)
649 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
651 tp->csr6 |= 0x00200000; /* Store-n-forward. */
652 /* Restart the transmit process. */
653 tulip_restart_rxtx(tp);
654 outl(0, ioaddr + CSR1);
656 if (csr5 & (RxDied | RxNoBuf)) {
657 if (tp->flags & COMET_MAC_ADDR) {
658 outl(tp->mc_filter[0], ioaddr + 0xAC);
659 outl(tp->mc_filter[1], ioaddr + 0xB0);
662 if (csr5 & RxDied) { /* Missed a Rx frame. */
663 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
664 tp->stats.rx_errors++;
665 tulip_start_rxtx(tp);
668 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
669 * call is ever done under the spinlock
671 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
673 (tp->link_change)(dev, csr5);
675 if (csr5 & SytemError) {
676 int error = (csr5 >> 23) & 7;
677 /* oops, we hit a PCI error. The code produced corresponds
682 * Note that on parity error, we should do a software reset
683 * of the chip to get it back into a sane state (according
684 * to the 21142/3 docs that is).
687 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
688 dev->name, tp->nir, error);
690 /* Clear all error sources, included undocumented ones! */
691 outl(0x0800f7ba, ioaddr + CSR5);
694 if (csr5 & TimerInt) {
697 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
699 outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
703 if (tx > maxtx || rx > maxrx || oi > maxoi) {
705 printk(KERN_WARNING "%s: Too much work during an interrupt, "
706 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
708 /* Acknowledge all interrupt sources. */
709 outl(0x8001ffff, ioaddr + CSR5);
710 if (tp->flags & HAS_INTR_MITIGATION) {
711 /* Josip Loncaric at ICASE did extensive experimentation
712 to develop a good interrupt mitigation setting.*/
713 outl(0x8b240000, ioaddr + CSR11);
714 } else if (tp->chip_id == LC82C168) {
715 /* the LC82C168 doesn't have a hw timer.*/
716 outl(0x00, ioaddr + CSR7);
717 mod_timer(&tp->timer, RUN_AT(HZ/50));
719 /* Mask all interrupting sources, set timer to
721 outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
722 outl(0x0012, ioaddr + CSR11);
731 csr5 = inl(ioaddr + CSR5);
733 #ifdef CONFIG_TULIP_NAPI
736 } while ((csr5 & (TxNoBuf |
747 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
749 tulip_refill_rx(dev);
751 /* check if the card is in suspend mode */
752 entry = tp->dirty_rx % RX_RING_SIZE;
753 if (tp->rx_buffers[entry].skb == NULL) {
755 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
756 if (tp->chip_id == LC82C168) {
757 outl(0x00, ioaddr + CSR7);
758 mod_timer(&tp->timer, RUN_AT(HZ/50));
760 if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
762 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
763 outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
765 outl(TimerInt, ioaddr + CSR5);
766 outl(12, ioaddr + CSR11);
771 #endif /* CONFIG_TULIP_NAPI */
773 if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
774 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
778 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
779 dev->name, inl(ioaddr + CSR5));