ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / drivers / net / tulip / interrupt.c
1 /*
2         drivers/net/tulip/interrupt.c
3
4         Maintained by Jeff Garzik <jgarzik@pobox.com>
5         Copyright 2000,2001  The Linux Kernel Team
6         Written/copyright 1994-2001 by Donald Becker.
7
8         This software may be used and distributed according to the terms
9         of the GNU General Public License, incorporated herein by reference.
10
11         Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html}
12         for more information on this driver, or visit the project
13         Web page at http://sourceforge.net/projects/tulip/
14
15 */
16
17 #include "tulip.h"
18 #include <linux/config.h>
19 #include <linux/etherdevice.h>
20 #include <linux/pci.h>
21
22 int tulip_rx_copybreak;
23 unsigned int tulip_max_interrupt_work;
24
25 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
26 #define MIT_SIZE 15
27 #define MIT_TABLE 15 /* We use 0 or max */
28
29 unsigned int mit_table[MIT_SIZE+1] =
30 {
31         /*  CRS11 21143 hardware Mitigation Control Interrupt
32             We use only RX mitigation we other techniques for
33             TX intr. mitigation.
34
35            31    Cycle Size (timer control)
36            30:27 TX timer in 16 * Cycle size
37            26:24 TX No pkts before Int.
38            23:20 RX timer in Cycle size
39            19:17 RX No pkts before Int.
40            16       Continues Mode (CM)
41         */
42
43         0x0,             /* IM disabled */
44         0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
45         0x80150000,
46         0x80270000,
47         0x80370000,
48         0x80490000,
49         0x80590000,
50         0x80690000,
51         0x807B0000,
52         0x808B0000,
53         0x809D0000,
54         0x80AD0000,
55         0x80BD0000,
56         0x80CF0000,
57         0x80DF0000,
58 //       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
59         0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
60 };
61 #endif
62
63
64 int tulip_refill_rx(struct net_device *dev)
65 {
66         struct tulip_private *tp = netdev_priv(dev);
67         int entry;
68         int refilled = 0;
69
70         /* Refill the Rx ring buffers. */
71         for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
72                 entry = tp->dirty_rx % RX_RING_SIZE;
73                 if (tp->rx_buffers[entry].skb == NULL) {
74                         struct sk_buff *skb;
75                         dma_addr_t mapping;
76
77                         skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
78                         if (skb == NULL)
79                                 break;
80
81                         mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
82                                                  PCI_DMA_FROMDEVICE);
83                         tp->rx_buffers[entry].mapping = mapping;
84
85                         skb->dev = dev;                 /* Mark as being used by this device. */
86                         tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
87                         refilled++;
88                 }
89                 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
90         }
91         if(tp->chip_id == LC82C168) {
92                 if(((inl(dev->base_addr + CSR5)>>17)&0x07) == 4) {
93                         /* Rx stopped due to out of buffers,
94                          * restart it
95                          */
96                         outl(0x01, dev->base_addr + CSR2);
97                 }
98         }
99         return refilled;
100 }
101
102 #ifdef CONFIG_TULIP_NAPI
103
104 void oom_timer(unsigned long data)
105 {
106         struct net_device *dev = (struct net_device *)data;
107         netif_rx_schedule(dev);
108 }
109
110 int tulip_poll(struct net_device *dev, int *budget)
111 {
112         struct tulip_private *tp = netdev_priv(dev);
113         int entry = tp->cur_rx % RX_RING_SIZE;
114         int rx_work_limit = *budget;
115         int received = 0;
116
117         if (!netif_running(dev))
118                 goto done;
119
120         if (rx_work_limit > dev->quota)
121                 rx_work_limit = dev->quota;
122
123 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
124
125 /* that one buffer is needed for mit activation; or might be a
126    bug in the ring buffer code; check later -- JHS*/
127
128         if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
129 #endif
130
131         if (tulip_debug > 4)
132                 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
133                            tp->rx_ring[entry].status);
134
135        do {
136                /* Acknowledge current RX interrupt sources. */
137                outl((RxIntr | RxNoBuf), dev->base_addr + CSR5);
138  
139  
140                /* If we own the next entry, it is a new packet. Send it up. */
141                while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142                        s32 status = le32_to_cpu(tp->rx_ring[entry].status);
143  
144  
145                        if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
146                                break;
147  
148                        if (tulip_debug > 5)
149                                printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
150                                       dev->name, entry, status);
151                        if (--rx_work_limit < 0)
152                                goto not_done;
153  
154                        if ((status & 0x38008300) != 0x0300) {
155                                if ((status & 0x38000300) != 0x0300) {
156                                 /* Ingore earlier buffers. */
157                                        if ((status & 0xffff) != 0x7fff) {
158                                                if (tulip_debug > 1)
159                                                        printk(KERN_WARNING "%s: Oversized Ethernet frame "
160                                                               "spanned multiple buffers, status %8.8x!\n",
161                                                               dev->name, status);
162                                                tp->stats.rx_length_errors++;
163                                        }
164                                } else if (status & RxDescFatalErr) {
165                                 /* There was a fatal error. */
166                                        if (tulip_debug > 2)
167                                                printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
168                                                       dev->name, status);
169                                        tp->stats.rx_errors++; /* end of a packet.*/
170                                        if (status & 0x0890) tp->stats.rx_length_errors++;
171                                        if (status & 0x0004) tp->stats.rx_frame_errors++;
172                                        if (status & 0x0002) tp->stats.rx_crc_errors++;
173                                        if (status & 0x0001) tp->stats.rx_fifo_errors++;
174                                }
175                        } else {
176                                /* Omit the four octet CRC from the length. */
177                                short pkt_len = ((status >> 16) & 0x7ff) - 4;
178                                struct sk_buff *skb;
179   
180 #ifndef final_version
181                                if (pkt_len > 1518) {
182                                        printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
183                                               dev->name, pkt_len, pkt_len);
184                                        pkt_len = 1518;
185                                        tp->stats.rx_length_errors++;
186                                }
187 #endif
188                                /* Check if the packet is long enough to accept without copying
189                                   to a minimally-sized skbuff. */
190                                if (pkt_len < tulip_rx_copybreak
191                                    && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
192                                        skb->dev = dev;
193                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
194                                        pci_dma_sync_single_for_cpu(tp->pdev,
195                                                                    tp->rx_buffers[entry].mapping,
196                                                                    pkt_len, PCI_DMA_FROMDEVICE);
197 #if ! defined(__alpha__)
198                                        eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
199                                                         pkt_len, 0);
200                                        skb_put(skb, pkt_len);
201 #else
202                                        memcpy(skb_put(skb, pkt_len),
203                                               tp->rx_buffers[entry].skb->tail,
204                                               pkt_len);
205 #endif
206                                        pci_dma_sync_single_for_device(tp->pdev,
207                                                                       tp->rx_buffers[entry].mapping,
208                                                                       pkt_len, PCI_DMA_FROMDEVICE);
209                                } else {        /* Pass up the skb already on the Rx ring. */
210                                        char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
211                                                             pkt_len);
212   
213 #ifndef final_version
214                                        if (tp->rx_buffers[entry].mapping !=
215                                            le32_to_cpu(tp->rx_ring[entry].buffer1)) {
216                                                printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
217                                                       "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
218                                                       dev->name,
219                                                       le32_to_cpu(tp->rx_ring[entry].buffer1),
220                                                       (unsigned long long)tp->rx_buffers[entry].mapping,
221                                                       skb->head, temp);
222                                        }
223 #endif
224   
225                                        pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
226                                                         PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
227   
228                                        tp->rx_buffers[entry].skb = NULL;
229                                        tp->rx_buffers[entry].mapping = 0;
230                                }
231                                skb->protocol = eth_type_trans(skb, dev);
232   
233                                netif_receive_skb(skb);
234  
235                                dev->last_rx = jiffies;
236                                tp->stats.rx_packets++;
237                                tp->stats.rx_bytes += pkt_len;
238                        }
239                        received++;
240
241                        entry = (++tp->cur_rx) % RX_RING_SIZE;
242                        if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
243                                tulip_refill_rx(dev);
244  
245                 }
246  
247                /* New ack strategy... irq does not ack Rx any longer
248                   hopefully this helps */
249  
250                /* Really bad things can happen here... If new packet arrives
251                 * and an irq arrives (tx or just due to occasionally unset
252                 * mask), it will be acked by irq handler, but new thread
253                 * is not scheduled. It is major hole in design.
254                 * No idea how to fix this if "playing with fire" will fail
255                 * tomorrow (night 011029). If it will not fail, we won
256                 * finally: amount of IO did not increase at all. */
257        } while ((inl(dev->base_addr + CSR5) & RxIntr));
258  
259 done:
260  
261  #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
262   
263           /* We use this simplistic scheme for IM. It's proven by
264              real life installations. We can have IM enabled
265             continuesly but this would cause unnecessary latency. 
266             Unfortunely we can't use all the NET_RX_* feedback here. 
267             This would turn on IM for devices that is not contributing 
268             to backlog congestion with unnecessary latency. 
269   
270              We monitor the the device RX-ring and have:
271   
272              HW Interrupt Mitigation either ON or OFF.
273   
274             ON:  More then 1 pkt received (per intr.) OR we are dropping 
275              OFF: Only 1 pkt received
276             
277              Note. We only use min and max (0, 15) settings from mit_table */
278   
279   
280           if( tp->flags &  HAS_INTR_MITIGATION) {
281                  if( received > 1 ) {
282                          if( ! tp->mit_on ) {
283                                  tp->mit_on = 1;
284                                  outl(mit_table[MIT_TABLE], dev->base_addr + CSR11);
285                          }
286                   }
287                  else {
288                          if( tp->mit_on ) {
289                                  tp->mit_on = 0;
290                                  outl(0, dev->base_addr + CSR11);
291                          }
292                   }
293           }
294
295 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
296  
297          dev->quota -= received;
298          *budget -= received;
299  
300          tulip_refill_rx(dev);
301          
302          /* If RX ring is not full we are out of memory. */
303          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
304  
305          /* Remove us from polling list and enable RX intr. */
306  
307          netif_rx_complete(dev);
308          outl(tulip_tbl[tp->chip_id].valid_intrs, dev->base_addr+CSR7);
309  
310          /* The last op happens after poll completion. Which means the following:
311           * 1. it can race with disabling irqs in irq handler
312           * 2. it can race with dise/enabling irqs in other poll threads
313           * 3. if an irq raised after beginning loop, it will be immediately
314           *    triggered here.
315           *
316           * Summarizing: the logic results in some redundant irqs both
317           * due to races in masking and due to too late acking of already
318           * processed irqs. But it must not result in losing events.
319           */
320  
321          return 0;
322  
323  not_done:
324          if (!received) {
325
326                  received = dev->quota; /* Not to happen */
327          }
328          dev->quota -= received;
329          *budget -= received;
330  
331          if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
332              tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
333                  tulip_refill_rx(dev);
334  
335          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
336  
337          return 1;
338  
339  
340  oom:    /* Executed with RX ints disabled */
341  
342          
343          /* Start timer, stop polling, but do not enable rx interrupts. */
344          mod_timer(&tp->oom_timer, jiffies+1);
345        
346          /* Think: timer_pending() was an explicit signature of bug.
347           * Timer can be pending now but fired and completed
348           * before we did netif_rx_complete(). See? We would lose it. */
349  
350          /* remove ourselves from the polling list */
351          netif_rx_complete(dev);
352  
353          return 0;
354 }
355
356 #else /* CONFIG_TULIP_NAPI */
357
358 static int tulip_rx(struct net_device *dev)
359 {
360         struct tulip_private *tp = netdev_priv(dev);
361         int entry = tp->cur_rx % RX_RING_SIZE;
362         int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
363         int received = 0;
364
365         if (tulip_debug > 4)
366                 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
367                            tp->rx_ring[entry].status);
368         /* If we own the next entry, it is a new packet. Send it up. */
369         while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
370                 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
371
372                 if (tulip_debug > 5)
373                         printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
374                                    dev->name, entry, status);
375                 if (--rx_work_limit < 0)
376                         break;
377                 if ((status & 0x38008300) != 0x0300) {
378                         if ((status & 0x38000300) != 0x0300) {
379                                 /* Ingore earlier buffers. */
380                                 if ((status & 0xffff) != 0x7fff) {
381                                         if (tulip_debug > 1)
382                                                 printk(KERN_WARNING "%s: Oversized Ethernet frame "
383                                                            "spanned multiple buffers, status %8.8x!\n",
384                                                            dev->name, status);
385                                         tp->stats.rx_length_errors++;
386                                 }
387                         } else if (status & RxDescFatalErr) {
388                                 /* There was a fatal error. */
389                                 if (tulip_debug > 2)
390                                         printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
391                                                    dev->name, status);
392                                 tp->stats.rx_errors++; /* end of a packet.*/
393                                 if (status & 0x0890) tp->stats.rx_length_errors++;
394                                 if (status & 0x0004) tp->stats.rx_frame_errors++;
395                                 if (status & 0x0002) tp->stats.rx_crc_errors++;
396                                 if (status & 0x0001) tp->stats.rx_fifo_errors++;
397                         }
398                 } else {
399                         /* Omit the four octet CRC from the length. */
400                         short pkt_len = ((status >> 16) & 0x7ff) - 4;
401                         struct sk_buff *skb;
402
403 #ifndef final_version
404                         if (pkt_len > 1518) {
405                                 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
406                                            dev->name, pkt_len, pkt_len);
407                                 pkt_len = 1518;
408                                 tp->stats.rx_length_errors++;
409                         }
410 #endif
411
412                         /* Check if the packet is long enough to accept without copying
413                            to a minimally-sized skbuff. */
414                         if (pkt_len < tulip_rx_copybreak
415                                 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
416                                 skb->dev = dev;
417                                 skb_reserve(skb, 2);    /* 16 byte align the IP header */
418                                 pci_dma_sync_single_for_cpu(tp->pdev,
419                                                             tp->rx_buffers[entry].mapping,
420                                                             pkt_len, PCI_DMA_FROMDEVICE);
421 #if ! defined(__alpha__)
422                                 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
423                                                  pkt_len, 0);
424                                 skb_put(skb, pkt_len);
425 #else
426                                 memcpy(skb_put(skb, pkt_len),
427                                        tp->rx_buffers[entry].skb->tail,
428                                        pkt_len);
429 #endif
430                                 pci_dma_sync_single_for_device(tp->pdev,
431                                                                tp->rx_buffers[entry].mapping,
432                                                                pkt_len, PCI_DMA_FROMDEVICE);
433                         } else {        /* Pass up the skb already on the Rx ring. */
434                                 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
435                                                      pkt_len);
436
437 #ifndef final_version
438                                 if (tp->rx_buffers[entry].mapping !=
439                                     le32_to_cpu(tp->rx_ring[entry].buffer1)) {
440                                         printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
441                                                "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
442                                                dev->name,
443                                                le32_to_cpu(tp->rx_ring[entry].buffer1),
444                                                (long long)tp->rx_buffers[entry].mapping,
445                                                skb->head, temp);
446                                 }
447 #endif
448
449                                 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
450                                                  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
451
452                                 tp->rx_buffers[entry].skb = NULL;
453                                 tp->rx_buffers[entry].mapping = 0;
454                         }
455                         skb->protocol = eth_type_trans(skb, dev);
456
457                         netif_rx(skb);
458
459                         dev->last_rx = jiffies;
460                         tp->stats.rx_packets++;
461                         tp->stats.rx_bytes += pkt_len;
462                 }
463                 received++;
464                 entry = (++tp->cur_rx) % RX_RING_SIZE;
465         }
466         return received;
467 }
468 #endif  /* CONFIG_TULIP_NAPI */
469
470 static inline unsigned int phy_interrupt (struct net_device *dev)
471 {
472 #ifdef __hppa__
473         int csr12 = inl(dev->base_addr + CSR12) & 0xff;
474         struct tulip_private *tp = netdev_priv(dev);
475
476         if (csr12 != tp->csr12_shadow) {
477                 /* ack interrupt */
478                 outl(csr12 | 0x02, dev->base_addr + CSR12);
479                 tp->csr12_shadow = csr12;
480                 /* do link change stuff */
481                 spin_lock(&tp->lock);
482                 tulip_check_duplex(dev);
483                 spin_unlock(&tp->lock);
484                 /* clear irq ack bit */
485                 outl(csr12 & ~0x02, dev->base_addr + CSR12);
486
487                 return 1;
488         }
489 #endif
490
491         return 0;
492 }
493
494 /* The interrupt handler does all of the Rx thread work and cleans up
495    after the Tx thread. */
496 irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
497 {
498         struct net_device *dev = (struct net_device *)dev_instance;
499         struct tulip_private *tp = netdev_priv(dev);
500         long ioaddr = dev->base_addr;
501         int csr5;
502         int missed;
503         int rx = 0;
504         int tx = 0;
505         int oi = 0;
506         int maxrx = RX_RING_SIZE;
507         int maxtx = TX_RING_SIZE;
508         int maxoi = TX_RING_SIZE;
509 #ifdef CONFIG_TULIP_NAPI
510         int rxd = 0;
511 #else
512         int entry;
513 #endif
514         unsigned int work_count = tulip_max_interrupt_work;
515         unsigned int handled = 0;
516
517         /* Let's see whether the interrupt really is for us */
518         csr5 = inl(ioaddr + CSR5);
519
520         if (tp->flags & HAS_PHY_IRQ) 
521                 handled = phy_interrupt (dev);
522     
523         if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
524                 return IRQ_RETVAL(handled);
525
526         tp->nir++;
527
528         do {
529
530 #ifdef CONFIG_TULIP_NAPI
531
532                 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
533                         rxd++;
534                         /* Mask RX intrs and add the device to poll list. */
535                         outl(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
536                         netif_rx_schedule(dev);
537                         
538                         if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
539                                break;
540                 }
541                 
542                /* Acknowledge the interrupt sources we handle here ASAP
543                   the poll function does Rx and RxNoBuf acking */
544                 
545                 outl(csr5 & 0x0001ff3f, ioaddr + CSR5);
546
547 #else 
548                 /* Acknowledge all of the current interrupt sources ASAP. */
549                 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
550
551
552                 if (csr5 & (RxIntr | RxNoBuf)) {
553                                 rx += tulip_rx(dev);
554                         tulip_refill_rx(dev);
555                 }
556
557 #endif /*  CONFIG_TULIP_NAPI */
558                 
559                 if (tulip_debug > 4)
560                         printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
561                                dev->name, csr5, inl(dev->base_addr + CSR5));
562                 
563
564                 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
565                         unsigned int dirty_tx;
566
567                         spin_lock(&tp->lock);
568
569                         for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
570                                  dirty_tx++) {
571                                 int entry = dirty_tx % TX_RING_SIZE;
572                                 int status = le32_to_cpu(tp->tx_ring[entry].status);
573
574                                 if (status < 0)
575                                         break;                  /* It still has not been Txed */
576
577                                 /* Check for Rx filter setup frames. */
578                                 if (tp->tx_buffers[entry].skb == NULL) {
579                                         /* test because dummy frames not mapped */
580                                         if (tp->tx_buffers[entry].mapping)
581                                                 pci_unmap_single(tp->pdev,
582                                                          tp->tx_buffers[entry].mapping,
583                                                          sizeof(tp->setup_frame),
584                                                          PCI_DMA_TODEVICE);
585                                         continue;
586                                 }
587
588                                 if (status & 0x8000) {
589                                         /* There was an major error, log it. */
590 #ifndef final_version
591                                         if (tulip_debug > 1)
592                                                 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
593                                                            dev->name, status);
594 #endif
595                                         tp->stats.tx_errors++;
596                                         if (status & 0x4104) tp->stats.tx_aborted_errors++;
597                                         if (status & 0x0C00) tp->stats.tx_carrier_errors++;
598                                         if (status & 0x0200) tp->stats.tx_window_errors++;
599                                         if (status & 0x0002) tp->stats.tx_fifo_errors++;
600                                         if ((status & 0x0080) && tp->full_duplex == 0)
601                                                 tp->stats.tx_heartbeat_errors++;
602                                 } else {
603                                         tp->stats.tx_bytes +=
604                                                 tp->tx_buffers[entry].skb->len;
605                                         tp->stats.collisions += (status >> 3) & 15;
606                                         tp->stats.tx_packets++;
607                                 }
608
609                                 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
610                                                  tp->tx_buffers[entry].skb->len,
611                                                  PCI_DMA_TODEVICE);
612
613                                 /* Free the original skb. */
614                                 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
615                                 tp->tx_buffers[entry].skb = NULL;
616                                 tp->tx_buffers[entry].mapping = 0;
617                                 tx++;
618                         }
619
620 #ifndef final_version
621                         if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
622                                 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
623                                            dev->name, dirty_tx, tp->cur_tx);
624                                 dirty_tx += TX_RING_SIZE;
625                         }
626 #endif
627
628                         if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
629                                 netif_wake_queue(dev);
630
631                         tp->dirty_tx = dirty_tx;
632                         if (csr5 & TxDied) {
633                                 if (tulip_debug > 2)
634                                         printk(KERN_WARNING "%s: The transmitter stopped."
635                                                    "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
636                                                    dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
637                                 tulip_restart_rxtx(tp);
638                         }
639                         spin_unlock(&tp->lock);
640                 }
641
642                 /* Log errors. */
643                 if (csr5 & AbnormalIntr) {      /* Abnormal error summary bit. */
644                         if (csr5 == 0xffffffff)
645                                 break;
646                         if (csr5 & TxJabber) tp->stats.tx_errors++;
647                         if (csr5 & TxFIFOUnderflow) {
648                                 if ((tp->csr6 & 0xC000) != 0xC000)
649                                         tp->csr6 += 0x4000;     /* Bump up the Tx threshold */
650                                 else
651                                         tp->csr6 |= 0x00200000;  /* Store-n-forward. */
652                                 /* Restart the transmit process. */
653                                 tulip_restart_rxtx(tp);
654                                 outl(0, ioaddr + CSR1);
655                         }
656                         if (csr5 & (RxDied | RxNoBuf)) {
657                                 if (tp->flags & COMET_MAC_ADDR) {
658                                         outl(tp->mc_filter[0], ioaddr + 0xAC);
659                                         outl(tp->mc_filter[1], ioaddr + 0xB0);
660                                 }
661                         }
662                         if (csr5 & RxDied) {            /* Missed a Rx frame. */
663                                 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
664                                 tp->stats.rx_errors++;
665                                 tulip_start_rxtx(tp);
666                         }
667                         /*
668                          * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
669                          * call is ever done under the spinlock
670                          */
671                         if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
672                                 if (tp->link_change)
673                                         (tp->link_change)(dev, csr5);
674                         }
675                         if (csr5 & SytemError) {
676                                 int error = (csr5 >> 23) & 7;
677                                 /* oops, we hit a PCI error.  The code produced corresponds
678                                  * to the reason:
679                                  *  0 - parity error
680                                  *  1 - master abort
681                                  *  2 - target abort
682                                  * Note that on parity error, we should do a software reset
683                                  * of the chip to get it back into a sane state (according
684                                  * to the 21142/3 docs that is).
685                                  *   -- rmk
686                                  */
687                                 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
688                                         dev->name, tp->nir, error);
689                         }
690                         /* Clear all error sources, included undocumented ones! */
691                         outl(0x0800f7ba, ioaddr + CSR5);
692                         oi++;
693                 }
694                 if (csr5 & TimerInt) {
695
696                         if (tulip_debug > 2)
697                                 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
698                                            dev->name, csr5);
699                         outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
700                         tp->ttimer = 0;
701                         oi++;
702                 }
703                 if (tx > maxtx || rx > maxrx || oi > maxoi) {
704                         if (tulip_debug > 1)
705                                 printk(KERN_WARNING "%s: Too much work during an interrupt, "
706                                            "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
707
708                        /* Acknowledge all interrupt sources. */
709                         outl(0x8001ffff, ioaddr + CSR5);
710                         if (tp->flags & HAS_INTR_MITIGATION) {
711                      /* Josip Loncaric at ICASE did extensive experimentation
712                         to develop a good interrupt mitigation setting.*/
713                                 outl(0x8b240000, ioaddr + CSR11);
714                         } else if (tp->chip_id == LC82C168) {
715                                 /* the LC82C168 doesn't have a hw timer.*/
716                                 outl(0x00, ioaddr + CSR7);
717                                 mod_timer(&tp->timer, RUN_AT(HZ/50));
718                         } else {
719                           /* Mask all interrupting sources, set timer to
720                                 re-enable. */
721                                 outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
722                                 outl(0x0012, ioaddr + CSR11);
723                         }
724                         break;
725                 }
726
727                 work_count--;
728                 if (work_count == 0)
729                         break;
730
731                 csr5 = inl(ioaddr + CSR5);
732
733 #ifdef CONFIG_TULIP_NAPI
734                 if (rxd)
735                         csr5 &= ~RxPollInt;
736         } while ((csr5 & (TxNoBuf | 
737                           TxDied | 
738                           TxIntr | 
739                           TimerInt |
740                           /* Abnormal intr. */
741                           RxDied | 
742                           TxFIFOUnderflow | 
743                           TxJabber | 
744                           TPLnkFail |  
745                           SytemError )) != 0);
746 #else 
747         } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
748
749         tulip_refill_rx(dev);
750
751         /* check if the card is in suspend mode */
752         entry = tp->dirty_rx % RX_RING_SIZE;
753         if (tp->rx_buffers[entry].skb == NULL) {
754                 if (tulip_debug > 1)
755                         printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
756                 if (tp->chip_id == LC82C168) {
757                         outl(0x00, ioaddr + CSR7);
758                         mod_timer(&tp->timer, RUN_AT(HZ/50));
759                 } else {
760                         if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
761                                 if (tulip_debug > 1)
762                                         printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
763                                 outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
764                                         ioaddr + CSR7);
765                                 outl(TimerInt, ioaddr + CSR5);
766                                 outl(12, ioaddr + CSR11);
767                                 tp->ttimer = 1;
768                         }
769                 }
770         }
771 #endif /* CONFIG_TULIP_NAPI */
772
773         if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
774                 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
775         }
776
777         if (tulip_debug > 4)
778                 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
779                            dev->name, inl(ioaddr + CSR5));
780
781         return IRQ_HANDLED;
782 }