fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / drivers / net / tulip / interrupt.c
index cb0e304..e3488d7 100644 (file)
@@ -1,7 +1,7 @@
 /*
        drivers/net/tulip/interrupt.c
 
-       Maintained by Jeff Garzik <jgarzik@pobox.com>
+       Maintained by Valerie Henson <val_henson@linux.intel.com>
        Copyright 2000,2001  The Linux Kernel Team
        Written/copyright 1994-2001 by Donald Becker.
 
 
 */
 
+#include <linux/pci.h>
 #include "tulip.h"
-#include <linux/config.h>
 #include <linux/etherdevice.h>
-#include <linux/pci.h>
 
 int tulip_rx_copybreak;
 unsigned int tulip_max_interrupt_work;
@@ -26,7 +25,7 @@ unsigned int tulip_max_interrupt_work;
 #define MIT_SIZE 15
 #define MIT_TABLE 15 /* We use 0 or max */
 
-unsigned int mit_table[MIT_SIZE+1] =
+static unsigned int mit_table[MIT_SIZE+1] =
 {
         /*  CRS11 21143 hardware Mitigation Control Interrupt
             We use only RX mitigation we other techniques for
@@ -78,7 +77,7 @@ int tulip_refill_rx(struct net_device *dev)
                        if (skb == NULL)
                                break;
 
-                       mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
+                       mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
                                                 PCI_DMA_FROMDEVICE);
                        tp->rx_buffers[entry].mapping = mapping;
 
@@ -89,11 +88,11 @@ int tulip_refill_rx(struct net_device *dev)
                tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
        }
        if(tp->chip_id == LC82C168) {
-               if(((inl(dev->base_addr + CSR5)>>17)&0x07) == 4) {
+               if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
                        /* Rx stopped due to out of buffers,
                         * restart it
                         */
-                       outl(0x01, dev->base_addr + CSR2);
+                       iowrite32(0x01, tp->base_addr + CSR2);
                }
        }
        return refilled;
@@ -133,24 +132,28 @@ int tulip_poll(struct net_device *dev, int *budget)
                           tp->rx_ring[entry].status);
 
        do {
+               if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
+                       printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
+                       break;
+               }
                /* Acknowledge current RX interrupt sources. */
-               outl((RxIntr | RxNoBuf), dev->base_addr + CSR5);
+               iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
+
+
                /* If we own the next entry, it is a new packet. Send it up. */
                while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
                        s32 status = le32_to_cpu(tp->rx_ring[entry].status);
+
+
                        if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
                                break;
+
                        if (tulip_debug > 5)
                                printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
                                       dev->name, entry, status);
                        if (--rx_work_limit < 0)
                                goto not_done;
+
                        if ((status & 0x38008300) != 0x0300) {
                                if ((status & 0x38000300) != 0x0300) {
                                 /* Ingore earlier buffers. */
@@ -176,7 +179,7 @@ int tulip_poll(struct net_device *dev, int *budget)
                                /* Omit the four octet CRC from the length. */
                                short pkt_len = ((status >> 16) & 0x7ff) - 4;
                                struct sk_buff *skb;
-  
+
 #ifndef final_version
                                if (pkt_len > 1518) {
                                        printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
@@ -195,12 +198,12 @@ int tulip_poll(struct net_device *dev, int *budget)
                                                                   tp->rx_buffers[entry].mapping,
                                                                   pkt_len, PCI_DMA_FROMDEVICE);
 #if ! defined(__alpha__)
-                                       eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
+                                       eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
                                                         pkt_len, 0);
                                        skb_put(skb, pkt_len);
 #else
                                        memcpy(skb_put(skb, pkt_len),
-                                              tp->rx_buffers[entry].skb->tail,
+                                              tp->rx_buffers[entry].skb->data,
                                               pkt_len);
 #endif
                                        pci_dma_sync_single_for_device(tp->pdev,
@@ -209,7 +212,7 @@ int tulip_poll(struct net_device *dev, int *budget)
                                } else {        /* Pass up the skb already on the Rx ring. */
                                        char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
                                                             pkt_len);
-  
+
 #ifndef final_version
                                        if (tp->rx_buffers[entry].mapping !=
                                            le32_to_cpu(tp->rx_ring[entry].buffer1)) {
@@ -221,17 +224,17 @@ int tulip_poll(struct net_device *dev, int *budget)
                                                       skb->head, temp);
                                        }
 #endif
-  
+
                                        pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
                                                         PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
-  
+
                                        tp->rx_buffers[entry].skb = NULL;
                                        tp->rx_buffers[entry].mapping = 0;
                                }
                                skb->protocol = eth_type_trans(skb, dev);
-  
+
                                netif_receive_skb(skb);
+
                                dev->last_rx = jiffies;
                                tp->stats.rx_packets++;
                                tp->stats.rx_bytes += pkt_len;
@@ -241,12 +244,12 @@ int tulip_poll(struct net_device *dev, int *budget)
                        entry = (++tp->cur_rx) % RX_RING_SIZE;
                        if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
                                tulip_refill_rx(dev);
+
                 }
+
                /* New ack strategy... irq does not ack Rx any longer
                   hopefully this helps */
+
                /* Really bad things can happen here... If new packet arrives
                 * and an irq arrives (tx or just due to occasionally unset
                 * mask), it will be acked by irq handler, but new thread
@@ -254,59 +257,59 @@ int tulip_poll(struct net_device *dev, int *budget)
                 * No idea how to fix this if "playing with fire" will fail
                 * tomorrow (night 011029). If it will not fail, we won
                 * finally: amount of IO did not increase at all. */
-       } while ((inl(dev->base_addr + CSR5) & RxIntr));
+       } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
+
 done:
+
  #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
-  
+
           /* We use this simplistic scheme for IM. It's proven by
              real life installations. We can have IM enabled
-            continuesly but this would cause unnecessary latency. 
-            Unfortunely we can't use all the NET_RX_* feedback here. 
-            This would turn on IM for devices that is not contributing 
-            to backlog congestion with unnecessary latency. 
-  
+            continuesly but this would cause unnecessary latency.
+            Unfortunely we can't use all the NET_RX_* feedback here.
+            This would turn on IM for devices that is not contributing
+            to backlog congestion with unnecessary latency.
+
              We monitor the the device RX-ring and have:
-  
+
              HW Interrupt Mitigation either ON or OFF.
-  
-            ON:  More then 1 pkt received (per intr.) OR we are dropping 
+
+            ON:  More then 1 pkt received (per intr.) OR we are dropping
              OFF: Only 1 pkt received
-            
+
              Note. We only use min and max (0, 15) settings from mit_table */
-  
-  
+
+
           if( tp->flags &  HAS_INTR_MITIGATION) {
                  if( received > 1 ) {
                          if( ! tp->mit_on ) {
                                  tp->mit_on = 1;
-                                 outl(mit_table[MIT_TABLE], dev->base_addr + CSR11);
+                                 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
                          }
                   }
                  else {
                          if( tp->mit_on ) {
                                  tp->mit_on = 0;
-                                 outl(0, dev->base_addr + CSR11);
+                                 iowrite32(0, tp->base_addr + CSR11);
                          }
                   }
           }
 
 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
+
          dev->quota -= received;
          *budget -= received;
+
          tulip_refill_rx(dev);
-         
+
          /* If RX ring is not full we are out of memory. */
          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
+
          /* Remove us from polling list and enable RX intr. */
+
          netif_rx_complete(dev);
-         outl(tulip_tbl[tp->chip_id].valid_intrs, dev->base_addr+CSR7);
+         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
+
          /* The last op happens after poll completion. Which means the following:
           * 1. it can race with disabling irqs in irq handler
           * 2. it can race with dise/enabling irqs in other poll threads
@@ -317,9 +320,9 @@ done:
           * due to races in masking and due to too late acking of already
           * processed irqs. But it must not result in losing events.
           */
+
          return 0;
+
  not_done:
          if (!received) {
 
@@ -327,29 +330,29 @@ done:
          }
          dev->quota -= received;
          *budget -= received;
+
          if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
              tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
                  tulip_refill_rx(dev);
+
          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
+
          return 1;
+
+
  oom:    /* Executed with RX ints disabled */
-         
+
+
          /* Start timer, stop polling, but do not enable rx interrupts. */
          mod_timer(&tp->oom_timer, jiffies+1);
-       
+
          /* Think: timer_pending() was an explicit signature of bug.
           * Timer can be pending now but fired and completed
           * before we did netif_rx_complete(). See? We would lose it. */
+
          /* remove ourselves from the polling list */
          netif_rx_complete(dev);
+
          return 0;
 }
 
@@ -419,12 +422,12 @@ static int tulip_rx(struct net_device *dev)
                                                            tp->rx_buffers[entry].mapping,
                                                            pkt_len, PCI_DMA_FROMDEVICE);
 #if ! defined(__alpha__)
-                               eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
+                               eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
                                                 pkt_len, 0);
                                skb_put(skb, pkt_len);
 #else
                                memcpy(skb_put(skb, pkt_len),
-                                      tp->rx_buffers[entry].skb->tail,
+                                      tp->rx_buffers[entry].skb->data,
                                       pkt_len);
 #endif
                                pci_dma_sync_single_for_device(tp->pdev,
@@ -470,19 +473,19 @@ static int tulip_rx(struct net_device *dev)
 static inline unsigned int phy_interrupt (struct net_device *dev)
 {
 #ifdef __hppa__
-       int csr12 = inl(dev->base_addr + CSR12) & 0xff;
        struct tulip_private *tp = netdev_priv(dev);
+       int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
 
        if (csr12 != tp->csr12_shadow) {
                /* ack interrupt */
-               outl(csr12 | 0x02, dev->base_addr + CSR12);
+               iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
                tp->csr12_shadow = csr12;
                /* do link change stuff */
                spin_lock(&tp->lock);
                tulip_check_duplex(dev);
                spin_unlock(&tp->lock);
                /* clear irq ack bit */
-               outl(csr12 & ~0x02, dev->base_addr + CSR12);
+               iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
 
                return 1;
        }
@@ -493,11 +496,11 @@ static inline unsigned int phy_interrupt (struct net_device *dev)
 
 /* The interrupt handler does all of the Rx thread work and cleans up
    after the Tx thread. */
-irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+irqreturn_t tulip_interrupt(int irq, void *dev_instance)
 {
        struct net_device *dev = (struct net_device *)dev_instance;
        struct tulip_private *tp = netdev_priv(dev);
-       long ioaddr = dev->base_addr;
+       void __iomem *ioaddr = tp->base_addr;
        int csr5;
        int missed;
        int rx = 0;
@@ -515,11 +518,11 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
        unsigned int handled = 0;
 
        /* Let's see whether the interrupt really is for us */
-       csr5 = inl(ioaddr + CSR5);
+       csr5 = ioread32(ioaddr + CSR5);
 
-        if (tp->flags & HAS_PHY_IRQ) 
+        if (tp->flags & HAS_PHY_IRQ)
                handled = phy_interrupt (dev);
-    
+
        if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
                return IRQ_RETVAL(handled);
 
@@ -532,21 +535,21 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
                if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
                        rxd++;
                        /* Mask RX intrs and add the device to poll list. */
-                       outl(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
+                       iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
                        netif_rx_schedule(dev);
-                       
+
                        if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
                                break;
                }
-               
+
                /* Acknowledge the interrupt sources we handle here ASAP
                   the poll function does Rx and RxNoBuf acking */
-               
-               outl(csr5 & 0x0001ff3f, ioaddr + CSR5);
 
-#else 
+               iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
+
+#else
                /* Acknowledge all of the current interrupt sources ASAP. */
-               outl(csr5 & 0x0001ffff, ioaddr + CSR5);
+               iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
 
 
                if (csr5 & (RxIntr | RxNoBuf)) {
@@ -555,11 +558,11 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
                }
 
 #endif /*  CONFIG_TULIP_NAPI */
-               
+
                if (tulip_debug > 4)
                        printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
-                              dev->name, csr5, inl(dev->base_addr + CSR5));
-               
+                              dev->name, csr5, ioread32(ioaddr + CSR5));
+
 
                if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
                        unsigned int dirty_tx;
@@ -633,7 +636,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
                                if (tulip_debug > 2)
                                        printk(KERN_WARNING "%s: The transmitter stopped."
                                                   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
-                                                  dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
+                                                  dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
                                tulip_restart_rxtx(tp);
                        }
                        spin_unlock(&tp->lock);
@@ -651,16 +654,16 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
                                        tp->csr6 |= 0x00200000;  /* Store-n-forward. */
                                /* Restart the transmit process. */
                                tulip_restart_rxtx(tp);
-                               outl(0, ioaddr + CSR1);
+                               iowrite32(0, ioaddr + CSR1);
                        }
                        if (csr5 & (RxDied | RxNoBuf)) {
                                if (tp->flags & COMET_MAC_ADDR) {
-                                       outl(tp->mc_filter[0], ioaddr + 0xAC);
-                                       outl(tp->mc_filter[1], ioaddr + 0xB0);
+                                       iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
+                                       iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
                                }
                        }
                        if (csr5 & RxDied) {            /* Missed a Rx frame. */
-                                tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+                                tp->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
                                tp->stats.rx_errors++;
                                tulip_start_rxtx(tp);
                        }
@@ -688,7 +691,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
                                        dev->name, tp->nir, error);
                        }
                        /* Clear all error sources, included undocumented ones! */
-                       outl(0x0800f7ba, ioaddr + CSR5);
+                       iowrite32(0x0800f7ba, ioaddr + CSR5);
                        oi++;
                }
                if (csr5 & TimerInt) {
@@ -696,7 +699,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
                        if (tulip_debug > 2)
                                printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
                                           dev->name, csr5);
-                       outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+                       iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
                        tp->ttimer = 0;
                        oi++;
                }
@@ -706,20 +709,20 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
                                           "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
 
                        /* Acknowledge all interrupt sources. */
-                        outl(0x8001ffff, ioaddr + CSR5);
+                        iowrite32(0x8001ffff, ioaddr + CSR5);
                         if (tp->flags & HAS_INTR_MITIGATION) {
                      /* Josip Loncaric at ICASE did extensive experimentation
                        to develop a good interrupt mitigation setting.*/
-                                outl(0x8b240000, ioaddr + CSR11);
+                                iowrite32(0x8b240000, ioaddr + CSR11);
                         } else if (tp->chip_id == LC82C168) {
                                /* the LC82C168 doesn't have a hw timer.*/
-                               outl(0x00, ioaddr + CSR7);
+                               iowrite32(0x00, ioaddr + CSR7);
                                mod_timer(&tp->timer, RUN_AT(HZ/50));
                        } else {
                           /* Mask all interrupting sources, set timer to
                                re-enable. */
-                                outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
-                                outl(0x0012, ioaddr + CSR11);
+                                iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
+                                iowrite32(0x0012, ioaddr + CSR11);
                         }
                        break;
                }
@@ -728,22 +731,22 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
                if (work_count == 0)
                        break;
 
-               csr5 = inl(ioaddr + CSR5);
+               csr5 = ioread32(ioaddr + CSR5);
 
 #ifdef CONFIG_TULIP_NAPI
                if (rxd)
                        csr5 &= ~RxPollInt;
-       } while ((csr5 & (TxNoBuf | 
-                         TxDied | 
-                         TxIntr | 
+       } while ((csr5 & (TxNoBuf |
+                         TxDied |
+                         TxIntr |
                          TimerInt |
                          /* Abnormal intr. */
-                         RxDied | 
-                         TxFIFOUnderflow | 
-                         TxJabber | 
-                         TPLnkFail |  
+                         RxDied |
+                         TxFIFOUnderflow |
+                         TxJabber |
+                         TPLnkFail |
                          SytemError )) != 0);
-#else 
+#else
        } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
 
        tulip_refill_rx(dev);
@@ -754,29 +757,29 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
                if (tulip_debug > 1)
                        printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
                if (tp->chip_id == LC82C168) {
-                       outl(0x00, ioaddr + CSR7);
+                       iowrite32(0x00, ioaddr + CSR7);
                        mod_timer(&tp->timer, RUN_AT(HZ/50));
                } else {
-                       if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
+                       if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
                                if (tulip_debug > 1)
                                        printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
-                               outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
+                               iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
                                        ioaddr + CSR7);
-                               outl(TimerInt, ioaddr + CSR5);
-                               outl(12, ioaddr + CSR11);
+                               iowrite32(TimerInt, ioaddr + CSR5);
+                               iowrite32(12, ioaddr + CSR11);
                                tp->ttimer = 1;
                        }
                }
        }
 #endif /* CONFIG_TULIP_NAPI */
 
-       if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
+       if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
                tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
        }
 
        if (tulip_debug > 4)
                printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
-                          dev->name, inl(ioaddr + CSR5));
+                          dev->name, ioread32(ioaddr + CSR5));
 
        return IRQ_HANDLED;
 }