This commit was manufactured by cvs2svn to create branch
[linux-2.6.git] / net / core / netpoll.c
index daac168..1ac9932 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/interrupt.h>
 #include <linux/netpoll.h>
 #include <linux/sched.h>
+#include <linux/nmi.h>
 #include <net/tcp.h>
 #include <net/udp.h>
 
@@ -29,6 +30,9 @@
 #define MAX_SKBS 32
 #define MAX_UDP_CHUNK 1460
 
+#define NETPOLL_RX_ENABLED  1
+#define NETPOLL_RX_DROP     2
+
 static spinlock_t skb_list_lock = SPIN_LOCK_UNLOCKED;
 static int nr_skbs;
 static struct sk_buff *skbs;
@@ -38,6 +42,8 @@ static LIST_HEAD(rx_list);
 
 static int trapped;
 
+extern void (*netdump_func) (struct pt_regs *regs);
+
 #define MAX_SKB_SIZE \
                (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
                                sizeof(struct iphdr) + sizeof(struct ethhdr))
@@ -61,7 +67,7 @@ static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
 
 void netpoll_poll(struct netpoll *np)
 {
-       int budget = 1;
+       int budget = netdump_mode ? 64 : 16;
 
        if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
                return;
@@ -70,9 +76,19 @@ void netpoll_poll(struct netpoll *np)
        np->dev->poll_controller(np->dev);
 
        /* If scheduling is stopped, tickle NAPI bits */
-       if(trapped && np->dev->poll &&
-          test_bit(__LINK_STATE_RX_SCHED, &np->dev->state))
-               np->dev->poll(np->dev, &budget);
+       if (np->dev->poll && 
+           test_bit(__LINK_STATE_RX_SCHED, &np->dev->state)) {
+               np->dev->netpoll_rx |= NETPOLL_RX_DROP;
+               if (trapped) {
+                       np->dev->poll(np->dev, &budget);
+               } else {
+                       trapped = 1;
+                       np->dev->poll(np->dev, &budget);
+                       trapped = 0;
+               }
+               np->dev->netpoll_rx &= ~NETPOLL_RX_DROP;
+       }
+
        zap_completion_queue();
 }
 
@@ -115,6 +131,7 @@ static void zap_completion_queue(void)
        }
 
        put_cpu_var(softnet_data);
+       touch_nmi_watchdog();
 }
 
 static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
@@ -333,6 +350,9 @@ int netpoll_rx(struct sk_buff *skb)
        struct list_head *p;
        unsigned long flags;
 
+       if (!(skb->dev->netpoll_rx & NETPOLL_RX_ENABLED))
+               return 1;
+
        if (skb->dev->type != ARPHRD_ETHER)
                goto out;
 
@@ -591,15 +611,16 @@ int netpoll_setup(struct netpoll *np)
        if(np->rx_hook) {
                unsigned long flags;
 
-#ifdef CONFIG_NETPOLL_RX
-               np->dev->netpoll_rx = 1;
-#endif
+               np->dev->netpoll_rx = NETPOLL_RX_ENABLED;
 
                spin_lock_irqsave(&rx_list_lock, flags);
                list_add(&np->rx_list, &rx_list);
                spin_unlock_irqrestore(&rx_list_lock, flags);
        }
 
+       if(np->dump_func)
+               netdump_func = np->dump_func;
+
        return 0;
  release:
        dev_put(ndev);
@@ -613,9 +634,7 @@ void netpoll_cleanup(struct netpoll *np)
 
                spin_lock_irqsave(&rx_list_lock, flags);
                list_del(&np->rx_list);
-#ifdef CONFIG_NETPOLL_RX
                np->dev->netpoll_rx = 0;
-#endif
                spin_unlock_irqrestore(&rx_list_lock, flags);
        }
 
@@ -633,6 +652,13 @@ void netpoll_set_trap(int trap)
        trapped = trap;
 }
 
+void netpoll_reset_locks(struct netpoll *np)
+{
+       spin_lock_init(&rx_list_lock);
+       spin_lock_init(&skb_list_lock);
+       spin_lock_init(&np->dev->xmit_lock);
+}
+
 EXPORT_SYMBOL(netpoll_set_trap);
 EXPORT_SYMBOL(netpoll_trap);
 EXPORT_SYMBOL(netpoll_parse_options);
@@ -641,3 +667,4 @@ EXPORT_SYMBOL(netpoll_cleanup);
 EXPORT_SYMBOL(netpoll_send_skb);
 EXPORT_SYMBOL(netpoll_send_udp);
 EXPORT_SYMBOL(netpoll_poll);
+EXPORT_SYMBOL_GPL(netpoll_reset_locks);