vserver 1.9.5.x5
[linux-2.6.git] / net / core / netpoll.c
index cb3a03f..0860e08 100644 (file)
 #define MAX_SKBS 32
 #define MAX_UDP_CHUNK 1460
 
-static spinlock_t skb_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(skb_list_lock);
 static int nr_skbs;
 static struct sk_buff *skbs;
 
-static spinlock_t rx_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rx_list_lock);
 static LIST_HEAD(rx_list);
 
 static atomic_t trapped;
-spinlock_t netpoll_poll_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(netpoll_poll_lock);
 
 #define NETPOLL_RX_ENABLED  1
 #define NETPOLL_RX_DROP     2
@@ -65,27 +65,25 @@ static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
        return csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
 }
 
-void netpoll_poll(struct netpoll *np)
+/*
+ * Check whether delayed processing was scheduled for our current CPU,
+ * and then manually invoke NAPI polling to pump data off the card.
+ *
+ * In cases where there is bi-directional communications, reading only
+ * one message at a time can lead to packets being dropped by the
+ * network adapter, forcing superfluous retries and possibly timeouts.
+ * Thus, we set our budget to greater than 1.
+ */
+static void poll_napi(struct netpoll *np)
 {
-       /*
-        * In cases where there is bi-directional communications, reading
-        * only one message at a time can lead to packets being dropped by
-        * the network adapter, forcing superfluous retries and possibly
-        * timeouts.  Thus, we set our budget to a more reasonable value.
-        */
        int budget = 16;
        unsigned long flags;
+       struct softnet_data *queue;
 
-       if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
-               return;
-
-       /* Process pending work on NIC */
-       np->dev->poll_controller(np->dev);
-
-       /* If scheduling is stopped, tickle NAPI bits */
        spin_lock_irqsave(&netpoll_poll_lock, flags);
-       if (np->dev->poll &&
-           test_bit(__LINK_STATE_RX_SCHED, &np->dev->state)) {
+       queue = &__get_cpu_var(softnet_data);
+       if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
+           !list_empty(&queue->poll_list)) {
                np->dev->netpoll_rx |= NETPOLL_RX_DROP;
                atomic_inc(&trapped);
 
@@ -95,6 +93,17 @@ void netpoll_poll(struct netpoll *np)
                np->dev->netpoll_rx &= ~NETPOLL_RX_DROP;
        }
        spin_unlock_irqrestore(&netpoll_poll_lock, flags);
+}
+
+void netpoll_poll(struct netpoll *np)
+{
+       if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
+               return;
+
+       /* Process pending work on NIC */
+       np->dev->poll_controller(np->dev);
+       if (np->dev->poll)
+               poll_napi(np);
 
        zap_completion_queue();
 }
@@ -178,7 +187,7 @@ repeat:
        return skb;
 }
 
-void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
+static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 {
        int status;
 
@@ -565,7 +574,7 @@ int netpoll_setup(struct netpoll *np)
                goto release;
        }
 
-       if (!(ndev->flags & IFF_UP)) {
+       if (!netif_running(ndev)) {
                unsigned short oflags;
                unsigned long atmost, atleast;
 
@@ -611,7 +620,7 @@ int netpoll_setup(struct netpoll *np)
                rcu_read_lock();
                in_dev = __in_dev_get(ndev);
 
-               if (!in_dev) {
+               if (!in_dev || !in_dev->ifa_list) {
                        rcu_read_unlock();
                        printk(KERN_ERR "%s: no IP address for %s, aborting\n",
                               np->name, np->dev_name);
@@ -676,6 +685,5 @@ EXPORT_SYMBOL(netpoll_trap);
 EXPORT_SYMBOL(netpoll_parse_options);
 EXPORT_SYMBOL(netpoll_setup);
 EXPORT_SYMBOL(netpoll_cleanup);
-EXPORT_SYMBOL(netpoll_send_skb);
 EXPORT_SYMBOL(netpoll_send_udp);
 EXPORT_SYMBOL(netpoll_poll);