fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / net / core / dev.c
index eb965a7..81a3ae2 100644 (file)
@@ -76,7 +76,6 @@
 #include <asm/system.h>
 #include <linux/bitops.h>
 #include <linux/capability.h>
 #include <asm/system.h>
 #include <linux/bitops.h>
 #include <linux/capability.h>
-#include <linux/config.h>
 #include <linux/cpu.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/cpu.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
@@ -99,7 +98,6 @@
 #include <linux/seq_file.h>
 #include <linux/stat.h>
 #include <linux/if_bridge.h>
 #include <linux/seq_file.h>
 #include <linux/stat.h>
 #include <linux/if_bridge.h>
-#include <linux/divert.h>
 #include <net/dst.h>
 #include <net/pkt_sched.h>
 #include <net/checksum.h>
 #include <net/dst.h>
 #include <net/pkt_sched.h>
 #include <net/checksum.h>
 #include <net/iw_handler.h>
 #include <asm/current.h>
 #include <linux/audit.h>
 #include <net/iw_handler.h>
 #include <asm/current.h>
 #include <linux/audit.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/vs_context.h> /* remove with NXF_HIDE_NETIF */
 #include <linux/vs_network.h>
 
 #include <linux/vs_network.h>
 
+#ifdef CONFIG_XEN
+#include <net/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#endif
+
 /*
  *     The list of packet types we will receive (as opposed to discard)
  *     and the routines to invoke.
 /*
  *     The list of packet types we will receive (as opposed to discard)
  *     and the routines to invoke.
@@ -149,6 +157,12 @@ static DEFINE_SPINLOCK(ptype_lock);
 static struct list_head ptype_base[16];        /* 16 way hashed list */
 static struct list_head ptype_all;             /* Taps */
 
 static struct list_head ptype_base[16];        /* 16 way hashed list */
 static struct list_head ptype_all;             /* Taps */
 
+#ifdef CONFIG_NET_DMA
+static struct dma_client *net_dma_client;
+static unsigned int net_dma_count;
+static spinlock_t net_dma_event_lock;
+#endif
+
 /*
  * The @dev_base list is protected by @dev_base_lock and the rtnl
  * semaphore.
 /*
  * The @dev_base list is protected by @dev_base_lock and the rtnl
  * semaphore.
@@ -223,7 +237,7 @@ extern void netdev_unregister_sysfs(struct net_device *);
  *     For efficiency
  */
 
  *     For efficiency
  */
 
-int netdev_nit;
+static int netdev_nit;
 
 /*
  *     Add a protocol ID to the list. Now that the input handler is
 
 /*
  *     Add a protocol ID to the list. Now that the input handler is
@@ -626,14 +640,24 @@ struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mas
  *     @name: name string
  *
  *     Network device names need to be valid file names to
  *     @name: name string
  *
  *     Network device names need to be valid file names to
- *     to allow sysfs to work
+ *     to allow sysfs to work.  We also disallow any kind of
+ *     whitespace.
  */
 int dev_valid_name(const char *name)
 {
  */
 int dev_valid_name(const char *name)
 {
-       return !(*name == '\0' 
-                || !strcmp(name, ".")
-                || !strcmp(name, "..")
-                || strchr(name, '/'));
+       if (*name == '\0')
+               return 0;
+       if (strlen(name) >= IFNAMSIZ)
+               return 0;
+       if (!strcmp(name, ".") || !strcmp(name, ".."))
+               return 0;
+
+       while (*name) {
+               if (*name == '/' || isspace(*name))
+                       return 0;
+               name++;
+       }
+       return 1;
 }
 
 /**
 }
 
 /**
@@ -1042,7 +1066,7 @@ static inline void net_timestamp(struct sk_buff *skb)
  *     taps currently in use.
  */
 
  *     taps currently in use.
  */
 
-void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
+static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 {
        struct packet_type *ptype;
 
 {
        struct packet_type *ptype;
 
@@ -1151,14 +1175,17 @@ EXPORT_SYMBOL(netif_device_attach);
  * Invalidate hardware checksum when packet is to be mangled, and
  * complete checksum manually on outgoing path.
  */
  * Invalidate hardware checksum when packet is to be mangled, and
  * complete checksum manually on outgoing path.
  */
-int skb_checksum_help(struct sk_buff *skb, int inward)
+int skb_checksum_help(struct sk_buff *skb)
 {
 {
-       unsigned int csum;
+       __wsum csum;
        int ret = 0, offset = skb->h.raw - skb->data;
 
        int ret = 0, offset = skb->h.raw - skb->data;
 
-       if (inward) {
-               skb->ip_summed = CHECKSUM_NONE;
-               goto out;
+       if (skb->ip_summed == CHECKSUM_COMPLETE)
+               goto out_set_summed;
+
+       if (unlikely(skb_shinfo(skb)->gso_size)) {
+               /* Let GSO fix up the checksum. */
+               goto out_set_summed;
        }
 
        if (skb_cloned(skb)) {
        }
 
        if (skb_cloned(skb)) {
@@ -1172,14 +1199,68 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
 
        offset = skb->tail - skb->h.raw;
        BUG_ON(offset <= 0);
 
        offset = skb->tail - skb->h.raw;
        BUG_ON(offset <= 0);
-       BUG_ON(skb->csum + 2 > offset);
+       BUG_ON(skb->csum_offset + 2 > offset);
 
 
-       *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
+       *(__sum16*)(skb->h.raw + skb->csum_offset) = csum_fold(csum);
+
+out_set_summed:
        skb->ip_summed = CHECKSUM_NONE;
 out:   
        return ret;
 }
 
        skb->ip_summed = CHECKSUM_NONE;
 out:   
        return ret;
 }
 
+/**
+ *     skb_gso_segment - Perform segmentation on skb.
+ *     @skb: buffer to segment
+ *     @features: features for the output path (see dev->features)
+ *
+ *     This function segments the given skb and returns a list of segments.
+ *
+ *     It may return NULL if the skb requires no segmentation.  This is
+ *     only possible when GSO is used for verifying header integrity.
+ */
+struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
+{
+       struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
+       struct packet_type *ptype;
+       __be16 type = skb->protocol;
+       int err;
+
+       BUG_ON(skb_shinfo(skb)->frag_list);
+
+       skb->mac.raw = skb->data;
+       skb->mac_len = skb->nh.raw - skb->data;
+       __skb_pull(skb, skb->mac_len);
+
+       if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+               if (skb_header_cloned(skb) &&
+                   (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+                       return ERR_PTR(err);
+       }
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
+               if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
+                       if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+                               err = ptype->gso_send_check(skb);
+                               segs = ERR_PTR(err);
+                               if (err || skb_gso_ok(skb, features))
+                                       break;
+                               __skb_push(skb, skb->data - skb->nh.raw);
+                       }
+                       segs = ptype->gso_segment(skb, features);
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       __skb_push(skb, skb->data - skb->mac.raw);
+
+       return segs;
+}
+
+EXPORT_SYMBOL(skb_gso_segment);
+
 /* Take action when hardware reception checksum errors are detected. */
 #ifdef CONFIG_BUG
 void netdev_rx_csum_fault(struct net_device *dev)
 /* Take action when hardware reception checksum errors are detected. */
 #ifdef CONFIG_BUG
 void netdev_rx_csum_fault(struct net_device *dev)
@@ -1193,7 +1274,6 @@ void netdev_rx_csum_fault(struct net_device *dev)
 EXPORT_SYMBOL(netdev_rx_csum_fault);
 #endif
 
 EXPORT_SYMBOL(netdev_rx_csum_fault);
 #endif
 
-#ifdef CONFIG_HIGHMEM
 /* Actually, we should eliminate this check as soon as we know, that:
  * 1. IOMMU is present and allows to map all the memory.
  * 2. No high memory really exists on this machine.
 /* Actually, we should eliminate this check as soon as we know, that:
  * 1. IOMMU is present and allows to map all the memory.
  * 2. No high memory really exists on this machine.
@@ -1201,6 +1281,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
 
 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
 {
 
 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
 {
+#ifdef CONFIG_HIGHMEM
        int i;
 
        if (dev->features & NETIF_F_HIGHDMA)
        int i;
 
        if (dev->features & NETIF_F_HIGHDMA)
@@ -1210,84 +1291,152 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
                if (PageHighMem(skb_shinfo(skb)->frags[i].page))
                        return 1;
 
                if (PageHighMem(skb_shinfo(skb)->frags[i].page))
                        return 1;
 
+#endif
        return 0;
 }
        return 0;
 }
-#else
-#define illegal_highdma(dev, skb)      (0)
-#endif
 
 
-/* Keep head the same: replace data */
-int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
+struct dev_gso_cb {
+       void (*destructor)(struct sk_buff *skb);
+};
+
+#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
+
+static void dev_gso_skb_destructor(struct sk_buff *skb)
 {
 {
-       unsigned int size;
-       u8 *data;
-       long offset;
-       struct skb_shared_info *ninfo;
-       int headerlen = skb->data - skb->head;
-       int expand = (skb->tail + skb->data_len) - skb->end;
+       struct dev_gso_cb *cb;
 
 
-       if (skb_shared(skb))
-               BUG();
+       do {
+               struct sk_buff *nskb = skb->next;
 
 
-       if (expand <= 0)
-               expand = 0;
+               skb->next = nskb->next;
+               nskb->next = NULL;
+               kfree_skb(nskb);
+       } while (skb->next);
 
 
-       size = skb->end - skb->head + expand;
-       size = SKB_DATA_ALIGN(size);
-       data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
-       if (!data)
-               return -ENOMEM;
+       cb = DEV_GSO_CB(skb);
+       if (cb->destructor)
+               cb->destructor(skb);
+}
 
 
-       /* Copy entire thing */
-       if (skb_copy_bits(skb, -headerlen, data, headerlen + skb->len))
-               BUG();
+/**
+ *     dev_gso_segment - Perform emulated hardware segmentation on skb.
+ *     @skb: buffer to segment
+ *
+ *     This function segments the given skb and stores the list of segments
+ *     in skb->next.
+ */
+static int dev_gso_segment(struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       struct sk_buff *segs;
+       int features = dev->features & ~(illegal_highdma(dev, skb) ?
+                                        NETIF_F_SG : 0);
 
 
-       /* Set up shinfo */
-       ninfo = (struct skb_shared_info*)(data + size);
-       atomic_set(&ninfo->dataref, 1);
-       ninfo->tso_size = skb_shinfo(skb)->tso_size;
-       ninfo->tso_segs = skb_shinfo(skb)->tso_segs;
-       ninfo->nr_frags = 0;
-       ninfo->frag_list = NULL;
+       segs = skb_gso_segment(skb, features);
+
+       /* Verifying header integrity only. */
+       if (!segs)
+               return 0;
+
+       if (unlikely(IS_ERR(segs)))
+               return PTR_ERR(segs);
+
+       skb->next = segs;
+       DEV_GSO_CB(skb)->destructor = skb->destructor;
+       skb->destructor = dev_gso_skb_destructor;
+
+       return 0;
+}
 
 
-       /* Offset between the two in bytes */
-       offset = data - skb->head;
+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       if (likely(!skb->next)) {
+               if (netdev_nit)
+                       dev_queue_xmit_nit(skb, dev);
 
 
-       /* Free old data. */
-       skb_release_data(skb);
+               if (netif_needs_gso(dev, skb)) {
+                       if (unlikely(dev_gso_segment(skb)))
+                               goto out_kfree_skb;
+                       if (skb->next)
+                               goto gso;
+               }
 
 
-       skb->head = data;
-       skb->end  = data + size;
+               return dev->hard_start_xmit(skb, dev);
+       }
 
 
-       /* Set up new pointers */
-       skb->h.raw   += offset;
-       skb->nh.raw  += offset;
-       skb->mac.raw += offset;
-       skb->tail    += offset;
-       skb->data    += offset;
+gso:
+       do {
+               struct sk_buff *nskb = skb->next;
+               int rc;
 
 
-       /* We are no longer a clone, even if we were. */
-       skb->cloned    = 0;
+               skb->next = nskb->next;
+               nskb->next = NULL;
+               rc = dev->hard_start_xmit(nskb, dev);
+               if (unlikely(rc)) {
+                       nskb->next = skb->next;
+                       skb->next = nskb;
+                       return rc;
+               }
+               if (unlikely(netif_queue_stopped(dev) && skb->next))
+                       return NETDEV_TX_BUSY;
+       } while (skb->next);
+       
+       skb->destructor = DEV_GSO_CB(skb)->destructor;
 
 
-       skb->tail     += skb->data_len;
-       skb->data_len  = 0;
+out_kfree_skb:
+       kfree_skb(skb);
        return 0;
 }
 
 #define HARD_TX_LOCK(dev, cpu) {                       \
        if ((dev->features & NETIF_F_LLTX) == 0) {      \
        return 0;
 }
 
 #define HARD_TX_LOCK(dev, cpu) {                       \
        if ((dev->features & NETIF_F_LLTX) == 0) {      \
-               spin_lock(&dev->xmit_lock);             \
-               dev->xmit_lock_owner = cpu;             \
+               netif_tx_lock(dev);                     \
        }                                               \
 }
 
 #define HARD_TX_UNLOCK(dev) {                          \
        if ((dev->features & NETIF_F_LLTX) == 0) {      \
        }                                               \
 }
 
 #define HARD_TX_UNLOCK(dev) {                          \
        if ((dev->features & NETIF_F_LLTX) == 0) {      \
-               dev->xmit_lock_owner = -1;              \
-               spin_unlock(&dev->xmit_lock);           \
+               netif_tx_unlock(dev);                   \
        }                                               \
 }
 
        }                                               \
 }
 
+#ifdef CONFIG_XEN
+inline int skb_checksum_setup(struct sk_buff *skb)
+{
+       if (skb->proto_csum_blank) {
+               if (skb->protocol != htons(ETH_P_IP))
+                       goto out;
+               skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
+               if (skb->h.raw >= skb->tail)
+                       goto out;
+               switch (skb->nh.iph->protocol) {
+               case IPPROTO_TCP:
+                       skb->csum = offsetof(struct tcphdr, check);
+                       break;
+               case IPPROTO_UDP:
+                       skb->csum = offsetof(struct udphdr, check);
+                       break;
+               default:
+                       if (net_ratelimit())
+                               printk(KERN_ERR "Attempting to checksum a non-"
+                                      "TCP/UDP packet, dropping a protocol"
+                                      " %d packet", skb->nh.iph->protocol);
+                       goto out;
+               }
+               if ((skb->h.raw + skb->csum + 2) > skb->tail)
+                       goto out;
+               skb->ip_summed = CHECKSUM_PARTIAL;
+               skb->proto_csum_blank = 0;
+       }
+       return 0;
+out:
+       return -EPROTO;
+}
+#else
+inline int skb_checksum_setup(struct sk_buff *skb) { return 0; }
+#endif
+
+
 /**
  *     dev_queue_xmit - transmit a buffer
  *     @skb: buffer to transmit
 /**
  *     dev_queue_xmit - transmit a buffer
  *     @skb: buffer to transmit
@@ -1320,9 +1469,19 @@ int dev_queue_xmit(struct sk_buff *skb)
        struct Qdisc *q;
        int rc = -ENOMEM;
 
        struct Qdisc *q;
        int rc = -ENOMEM;
 
+       /* If a checksum-deferred packet is forwarded to a device that needs a
+        * checksum, correct the pointers and force checksumming.
+        */
+       if (skb_checksum_setup(skb))
+               goto out_kfree_skb;
+
+       /* GSO will handle the following emulations directly. */
+       if (netif_needs_gso(dev, skb))
+               goto gso;
+
        if (skb_shinfo(skb)->frag_list &&
            !(dev->features & NETIF_F_FRAGLIST) &&
        if (skb_shinfo(skb)->frag_list &&
            !(dev->features & NETIF_F_FRAGLIST) &&
-           __skb_linearize(skb, GFP_ATOMIC))
+           __skb_linearize(skb))
                goto out_kfree_skb;
 
        /* Fragmented skb is linearized if device does not support SG,
                goto out_kfree_skb;
 
        /* Fragmented skb is linearized if device does not support SG,
@@ -1331,25 +1490,26 @@ int dev_queue_xmit(struct sk_buff *skb)
         */
        if (skb_shinfo(skb)->nr_frags &&
            (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
         */
        if (skb_shinfo(skb)->nr_frags &&
            (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
-           __skb_linearize(skb, GFP_ATOMIC))
+           __skb_linearize(skb))
                goto out_kfree_skb;
 
        /* If packet is not checksummed and device does not support
         * checksumming for this protocol, complete checksumming here.
         */
                goto out_kfree_skb;
 
        /* If packet is not checksummed and device does not support
         * checksumming for this protocol, complete checksumming here.
         */
-       if (skb->ip_summed == CHECKSUM_HW &&
-           (!(dev->features & (NETIF_F_HW_CSUM | NETIF_F_NO_CSUM)) &&
+       if (skb->ip_summed == CHECKSUM_PARTIAL &&
+           (!(dev->features & NETIF_F_GEN_CSUM) &&
             (!(dev->features & NETIF_F_IP_CSUM) ||
              skb->protocol != htons(ETH_P_IP))))
             (!(dev->features & NETIF_F_IP_CSUM) ||
              skb->protocol != htons(ETH_P_IP))))
-               if (skb_checksum_help(skb, 0))
+               if (skb_checksum_help(skb))
                        goto out_kfree_skb;
 
                        goto out_kfree_skb;
 
+gso:
        spin_lock_prefetch(&dev->queue_lock);
 
        /* Disable soft irqs for various locks below. Also 
         * stops preemption for RCU. 
         */
        spin_lock_prefetch(&dev->queue_lock);
 
        /* Disable soft irqs for various locks below. Also 
         * stops preemption for RCU. 
         */
-       local_bh_disable(); 
+       rcu_read_lock_bh(); 
 
        /* Updates of qdisc are serialized by queue_lock. 
         * The struct Qdisc which is pointed to by qdisc is now a 
 
        /* Updates of qdisc are serialized by queue_lock. 
         * The struct Qdisc which is pointed to by qdisc is now a 
@@ -1370,21 +1530,23 @@ int dev_queue_xmit(struct sk_buff *skb)
        if (q->enqueue) {
                /* Grab device queue */
                spin_lock(&dev->queue_lock);
        if (q->enqueue) {
                /* Grab device queue */
                spin_lock(&dev->queue_lock);
+               q = dev->qdisc;
+               if (q->enqueue) {
+                       rc = q->enqueue(skb, q);
+                       qdisc_run(dev);
+                       spin_unlock(&dev->queue_lock);
 
 
-               rc = q->enqueue(skb, q);
-
-               qdisc_run(dev);
-
+                       rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
+                       goto out;
+               }
                spin_unlock(&dev->queue_lock);
                spin_unlock(&dev->queue_lock);
-               rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
-               goto out;
        }
 
        /* The device has no queue. Common case for software devices:
           loopback, all the sorts of tunnels...
 
        }
 
        /* The device has no queue. Common case for software devices:
           loopback, all the sorts of tunnels...
 
-          Really, it is unlikely that xmit_lock protection is necessary here.
-          (f.e. loopback and IP tunnels are clean ignoring statistics
+          Really, it is unlikely that netif_tx_lock protection is necessary
+          here.  (f.e. loopback and IP tunnels are clean ignoring statistics
           counters.)
           However, it is possible, that they rely on protection
           made by us here.
           counters.)
           However, it is possible, that they rely on protection
           made by us here.
@@ -1400,11 +1562,8 @@ int dev_queue_xmit(struct sk_buff *skb)
                        HARD_TX_LOCK(dev, cpu);
 
                        if (!netif_queue_stopped(dev)) {
                        HARD_TX_LOCK(dev, cpu);
 
                        if (!netif_queue_stopped(dev)) {
-                               if (netdev_nit)
-                                       dev_queue_xmit_nit(skb, dev);
-
                                rc = 0;
                                rc = 0;
-                               if (!dev->hard_start_xmit(skb, dev)) {
+                               if (!dev_hard_start_xmit(skb, dev)) {
                                        HARD_TX_UNLOCK(dev);
                                        goto out;
                                }
                                        HARD_TX_UNLOCK(dev);
                                        goto out;
                                }
@@ -1423,13 +1582,13 @@ int dev_queue_xmit(struct sk_buff *skb)
        }
 
        rc = -ENETDOWN;
        }
 
        rc = -ENETDOWN;
-       local_bh_enable();
+       rcu_read_unlock_bh();
 
 out_kfree_skb:
        kfree_skb(skb);
        return rc;
 out:
 
 out_kfree_skb:
        kfree_skb(skb);
        return rc;
 out:
-       local_bh_enable();
+       rcu_read_unlock_bh();
        return rc;
 }
 
        return rc;
 }
 
@@ -1523,26 +1682,10 @@ static inline struct net_device *skb_bond(struct sk_buff *skb)
        struct net_device *dev = skb->dev;
 
        if (dev->master) {
        struct net_device *dev = skb->dev;
 
        if (dev->master) {
-               /*
-                * On bonding slaves other than the currently active
-                * slave, suppress duplicates except for 802.3ad
-                * ETH_P_SLOW and alb non-mcast/bcast.
-                */
-               if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
-                       if (dev->master->priv_flags & IFF_MASTER_ALB) {
-                               if (skb->pkt_type != PACKET_BROADCAST &&
-                                   skb->pkt_type != PACKET_MULTICAST)
-                                       goto keep;
-                       }
-
-                       if (dev->master->priv_flags & IFF_MASTER_8023AD &&
-                           skb->protocol == __constant_htons(ETH_P_SLOW))
-                               goto keep;
-               
+               if (skb_bond_should_drop(skb)) {
                        kfree_skb(skb);
                        return NULL;
                }
                        kfree_skb(skb);
                        return NULL;
                }
-keep:
                skb->dev = dev->master;
        }
 
                skb->dev = dev->master;
        }
 
@@ -1649,8 +1792,8 @@ static int ing_filter(struct sk_buff *skb)
        if (dev->qdisc_ingress) {
                __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
                if (MAX_RED_LOOP < ttl++) {
        if (dev->qdisc_ingress) {
                __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
                if (MAX_RED_LOOP < ttl++) {
-                       printk("Redir loop detected Dropping packet (%s->%s)\n",
-                               skb->input_dev->name, skb->dev->name);
+                       printk(KERN_WARNING "Redir loop detected Dropping packet (%d->%d)\n",
+                               skb->iif, skb->dev->ifindex);
                        return TC_ACT_SHOT;
                }
 
                        return TC_ACT_SHOT;
                }
 
@@ -1658,10 +1801,10 @@ static int ing_filter(struct sk_buff *skb)
 
                skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
 
 
                skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
 
-               spin_lock(&dev->ingress_lock);
+               spin_lock(&dev->queue_lock);
                if ((q = dev->qdisc_ingress) != NULL)
                        result = q->enqueue(skb, q);
                if ((q = dev->qdisc_ingress) != NULL)
                        result = q->enqueue(skb, q);
-               spin_unlock(&dev->ingress_lock);
+               spin_unlock(&dev->queue_lock);
 
        }
 
 
        }
 
@@ -1674,7 +1817,7 @@ int netif_receive_skb(struct sk_buff *skb)
        struct packet_type *ptype, *pt_prev;
        struct net_device *orig_dev;
        int ret = NET_RX_DROP;
        struct packet_type *ptype, *pt_prev;
        struct net_device *orig_dev;
        int ret = NET_RX_DROP;
-       unsigned short type;
+       __be16 type;
 
        /* if we've gotten here through NAPI, check netpoll */
        if (skb->dev->poll && netpoll_rx(skb))
 
        /* if we've gotten here through NAPI, check netpoll */
        if (skb->dev->poll && netpoll_rx(skb))
@@ -1683,8 +1826,8 @@ int netif_receive_skb(struct sk_buff *skb)
        if (!skb->tstamp.off_sec)
                net_timestamp(skb);
 
        if (!skb->tstamp.off_sec)
                net_timestamp(skb);
 
-       if (!skb->input_dev)
-               skb->input_dev = skb->dev;
+       if (!skb->iif)
+               skb->iif = skb->dev->ifindex;
 
        orig_dev = skb_bond(skb);
 
 
        orig_dev = skb_bond(skb);
 
@@ -1707,6 +1850,19 @@ int netif_receive_skb(struct sk_buff *skb)
        }
 #endif
 
        }
 #endif
 
+#ifdef CONFIG_XEN
+       switch (skb->ip_summed) {
+       case CHECKSUM_UNNECESSARY:
+               skb->proto_data_valid = 1;
+               break;
+       case CHECKSUM_PARTIAL:
+               /* XXX Implement me. */
+       default:
+               skb->proto_data_valid = 0;
+               break;
+       }
+#endif
+
        list_for_each_entry_rcu(ptype, &ptype_all, list) {
                if (!ptype->dev || ptype->dev == skb->dev) {
                        if (pt_prev) 
        list_for_each_entry_rcu(ptype, &ptype_all, list) {
                if (!ptype->dev || ptype->dev == skb->dev) {
                        if (pt_prev) 
@@ -1734,8 +1890,6 @@ int netif_receive_skb(struct sk_buff *skb)
 ncls:
 #endif
 
 ncls:
 #endif
 
-       handle_diverter(skb);
-
        if (handle_bridge(&skb, &pt_prev, &ret, orig_dev))
                goto out;
 
        if (handle_bridge(&skb, &pt_prev, &ret, orig_dev))
                goto out;
 
@@ -1847,6 +2001,19 @@ static void net_rx_action(struct softirq_action *h)
                }
        }
 out:
                }
        }
 out:
+#ifdef CONFIG_NET_DMA
+       /*
+        * There may not be any more sk_buffs coming right now, so push
+        * any pending DMA copies to hardware
+        */
+       if (net_dma_client) {
+               struct dma_chan *chan;
+               rcu_read_lock();
+               list_for_each_entry_rcu(chan, &net_dma_client->channels, client_node)
+                       dma_async_memcpy_issue_pending(chan);
+               rcu_read_unlock();
+       }
+#endif
        local_irq_enable();
        return;
 
        local_irq_enable();
        return;
 
@@ -2793,16 +2960,12 @@ int register_netdevice(struct net_device *dev)
        BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
 
        spin_lock_init(&dev->queue_lock);
        BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
 
        spin_lock_init(&dev->queue_lock);
-       spin_lock_init(&dev->xmit_lock);
+       spin_lock_init(&dev->_xmit_lock);
        dev->xmit_lock_owner = -1;
 #ifdef CONFIG_NET_CLS_ACT
        spin_lock_init(&dev->ingress_lock);
 #endif
 
        dev->xmit_lock_owner = -1;
 #ifdef CONFIG_NET_CLS_ACT
        spin_lock_init(&dev->ingress_lock);
 #endif
 
-       ret = alloc_divert_blk(dev);
-       if (ret)
-               goto out;
-
        dev->iflink = -1;
 
        /* Init, if this function is available */
        dev->iflink = -1;
 
        /* Init, if this function is available */
@@ -2811,13 +2974,13 @@ int register_netdevice(struct net_device *dev)
                if (ret) {
                        if (ret > 0)
                                ret = -EIO;
                if (ret) {
                        if (ret > 0)
                                ret = -EIO;
-                       goto out_err;
+                       goto out;
                }
        }
  
        if (!dev_valid_name(dev->name)) {
                ret = -EINVAL;
                }
        }
  
        if (!dev_valid_name(dev->name)) {
                ret = -EINVAL;
-               goto out_err;
+               goto out;
        }
 
        dev->ifindex = dev_new_index();
        }
 
        dev->ifindex = dev_new_index();
@@ -2831,16 +2994,14 @@ int register_netdevice(struct net_device *dev)
                        = hlist_entry(p, struct net_device, name_hlist);
                if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
                        ret = -EEXIST;
                        = hlist_entry(p, struct net_device, name_hlist);
                if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
                        ret = -EEXIST;
-                       goto out_err;
+                       goto out;
                }
        }
 
        /* Fix illegal SG+CSUM combinations. */
        if ((dev->features & NETIF_F_SG) &&
                }
        }
 
        /* Fix illegal SG+CSUM combinations. */
        if ((dev->features & NETIF_F_SG) &&
-           !(dev->features & (NETIF_F_IP_CSUM |
-                              NETIF_F_NO_CSUM |
-                              NETIF_F_HW_CSUM))) {
-               printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
+           !(dev->features & NETIF_F_ALL_CSUM)) {
+               printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
                       dev->name);
                dev->features &= ~NETIF_F_SG;
        }
                       dev->name);
                dev->features &= ~NETIF_F_SG;
        }
@@ -2848,7 +3009,7 @@ int register_netdevice(struct net_device *dev)
        /* TSO requires that SG is present as well. */
        if ((dev->features & NETIF_F_TSO) &&
            !(dev->features & NETIF_F_SG)) {
        /* TSO requires that SG is present as well. */
        if ((dev->features & NETIF_F_TSO) &&
            !(dev->features & NETIF_F_SG)) {
-               printk("%s: Dropping NETIF_F_TSO since no SG feature.\n",
+               printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
                       dev->name);
                dev->features &= ~NETIF_F_TSO;
        }
                       dev->name);
                dev->features &= ~NETIF_F_TSO;
        }
@@ -2877,7 +3038,7 @@ int register_netdevice(struct net_device *dev)
 
        ret = netdev_register_sysfs(dev);
        if (ret)
 
        ret = netdev_register_sysfs(dev);
        if (ret)
-               goto out_err;
+               goto out;
        dev->reg_state = NETREG_REGISTERED;
 
        /*
        dev->reg_state = NETREG_REGISTERED;
 
        /*
@@ -2904,9 +3065,6 @@ int register_netdevice(struct net_device *dev)
 
 out:
        return ret;
 
 out:
        return ret;
-out_err:
-       free_divert_blk(dev);
-       goto out;
 }
 
 /**
 }
 
 /**
@@ -2938,15 +3096,6 @@ int register_netdev(struct net_device *dev)
                        goto out;
        }
        
                        goto out;
        }
        
-       /*
-        * Back compatibility hook. Kill this one in 2.5
-        */
-       if (dev->name[0] == 0 || dev->name[0] == ' ') {
-               err = dev_alloc_name(dev, "eth%d");
-               if (err < 0)
-                       goto out;
-       }
-
        err = register_netdevice(dev);
 out:
        rtnl_unlock();
        err = register_netdevice(dev);
 out:
        rtnl_unlock();
@@ -3030,7 +3179,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
 static DEFINE_MUTEX(net_todo_run_mutex);
 void netdev_run_todo(void)
 {
 static DEFINE_MUTEX(net_todo_run_mutex);
 void netdev_run_todo(void)
 {
-       struct list_head list = LIST_HEAD_INIT(list);
+       struct list_head list;
 
        /* Need to guard against multiple cpu's getting out of order. */
        mutex_lock(&net_todo_run_mutex);
 
        /* Need to guard against multiple cpu's getting out of order. */
        mutex_lock(&net_todo_run_mutex);
@@ -3045,9 +3194,9 @@ void netdev_run_todo(void)
 
        /* Snapshot list, allow later requests */
        spin_lock(&net_todo_list_lock);
 
        /* Snapshot list, allow later requests */
        spin_lock(&net_todo_list_lock);
-       list_splice_init(&net_todo_list, &list);
+       list_replace_init(&net_todo_list, &list);
        spin_unlock(&net_todo_list_lock);
        spin_unlock(&net_todo_list_lock);
-               
+
        while (!list_empty(&list)) {
                struct net_device *dev
                        = list_entry(list.next, struct net_device, todo_list);
        while (!list_empty(&list)) {
                struct net_device *dev
                        = list_entry(list.next, struct net_device, todo_list);
@@ -3098,13 +3247,15 @@ struct net_device *alloc_netdev(int sizeof_priv, const char *name,
        struct net_device *dev;
        int alloc_size;
 
        struct net_device *dev;
        int alloc_size;
 
+       BUG_ON(strlen(name) >= sizeof(dev->name));
+
        /* ensure 32-byte alignment of both the device and private area */
        alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
        alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
 
        p = kzalloc(alloc_size, GFP_KERNEL);
        if (!p) {
        /* ensure 32-byte alignment of both the device and private area */
        alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
        alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
 
        p = kzalloc(alloc_size, GFP_KERNEL);
        if (!p) {
-               printk(KERN_ERR "alloc_dev: Unable to allocate device.\n");
+               printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
                return NULL;
        }
 
                return NULL;
        }
 
@@ -3230,8 +3381,6 @@ int unregister_netdevice(struct net_device *dev)
        /* Notifier chain MUST detach us from master device. */
        BUG_TRAP(!dev->master);
 
        /* Notifier chain MUST detach us from master device. */
        BUG_TRAP(!dev->master);
 
-       free_divert_blk(dev);
-
        /* Finish processing unregister after unlock */
        net_set_todo(dev);
 
        /* Finish processing unregister after unlock */
        net_set_todo(dev);
 
@@ -3262,7 +3411,6 @@ void unregister_netdev(struct net_device *dev)
 
 EXPORT_SYMBOL(unregister_netdev);
 
 
 EXPORT_SYMBOL(unregister_netdev);
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int dev_cpu_callback(struct notifier_block *nfb,
                            unsigned long action,
                            void *ocpu)
 static int dev_cpu_callback(struct notifier_block *nfb,
                            unsigned long action,
                            void *ocpu)
@@ -3306,8 +3454,84 @@ static int dev_cpu_callback(struct notifier_block *nfb,
 
        return NOTIFY_OK;
 }
 
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 
+#ifdef CONFIG_NET_DMA
+/**
+ * net_dma_rebalance -
+ * This is called when the number of channels allocated to the net_dma_client
+ * changes.  The net_dma_client tries to have one DMA channel per CPU.
+ */
+static void net_dma_rebalance(void)
+{
+       unsigned int cpu, i, n;
+       struct dma_chan *chan;
+
+       if (net_dma_count == 0) {
+               for_each_online_cpu(cpu)
+                       rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
+               return;
+       }
+
+       i = 0;
+       cpu = first_cpu(cpu_online_map);
+
+       rcu_read_lock();
+       list_for_each_entry(chan, &net_dma_client->channels, client_node) {
+               n = ((num_online_cpus() / net_dma_count)
+                  + (i < (num_online_cpus() % net_dma_count) ? 1 : 0));
+
+               while(n) {
+                       per_cpu(softnet_data, cpu).net_dma = chan;
+                       cpu = next_cpu(cpu, cpu_online_map);
+                       n--;
+               }
+               i++;
+       }
+       rcu_read_unlock();
+}
+
+/**
+ * netdev_dma_event - event callback for the net_dma_client
+ * @client: should always be net_dma_client
+ * @chan: DMA channel for the event
+ * @event: event type
+ */
+static void netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
+       enum dma_event event)
+{
+       spin_lock(&net_dma_event_lock);
+       switch (event) {
+       case DMA_RESOURCE_ADDED:
+               net_dma_count++;
+               net_dma_rebalance();
+               break;
+       case DMA_RESOURCE_REMOVED:
+               net_dma_count--;
+               net_dma_rebalance();
+               break;
+       default:
+               break;
+       }
+       spin_unlock(&net_dma_event_lock);
+}
+
+/**
+ * netdev_dma_regiser - register the networking subsystem as a DMA client
+ */
+static int __init netdev_dma_register(void)
+{
+       spin_lock_init(&net_dma_event_lock);
+       net_dma_client = dma_async_client_register(netdev_dma_event);
+       if (net_dma_client == NULL)
+               return -ENOMEM;
+
+       dma_async_client_chan_request(net_dma_client, num_online_cpus());
+       return 0;
+}
+
+#else
+static int __init netdev_dma_register(void) { return -ENODEV; }
+#endif /* CONFIG_NET_DMA */
 
 /*
  *     Initialize the DEV module. At boot time this walks the device list and
 
 /*
  *     Initialize the DEV module. At boot time this walks the device list and
@@ -3326,8 +3550,6 @@ static int __init net_dev_init(void)
 
        BUG_ON(!dev_boot_phase);
 
 
        BUG_ON(!dev_boot_phase);
 
-       net_random_init();
-
        if (dev_proc_init())
                goto out;
 
        if (dev_proc_init())
                goto out;
 
@@ -3361,6 +3583,8 @@ static int __init net_dev_init(void)
                atomic_set(&queue->backlog_dev.refcnt, 1);
        }
 
                atomic_set(&queue->backlog_dev.refcnt, 1);
        }
 
+       netdev_dma_register();
+
        dev_boot_phase = 0;
 
        open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
        dev_boot_phase = 0;
 
        open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
@@ -3379,7 +3603,6 @@ subsys_initcall(net_dev_init);
 EXPORT_SYMBOL(__dev_get_by_index);
 EXPORT_SYMBOL(__dev_get_by_name);
 EXPORT_SYMBOL(__dev_remove_pack);
 EXPORT_SYMBOL(__dev_get_by_index);
 EXPORT_SYMBOL(__dev_get_by_name);
 EXPORT_SYMBOL(__dev_remove_pack);
-EXPORT_SYMBOL(__skb_linearize);
 EXPORT_SYMBOL(dev_valid_name);
 EXPORT_SYMBOL(dev_add_pack);
 EXPORT_SYMBOL(dev_alloc_name);
 EXPORT_SYMBOL(dev_valid_name);
 EXPORT_SYMBOL(dev_add_pack);
 EXPORT_SYMBOL(dev_alloc_name);
@@ -3411,6 +3634,7 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
 EXPORT_SYMBOL(net_enable_timestamp);
 EXPORT_SYMBOL(net_disable_timestamp);
 EXPORT_SYMBOL(dev_get_flags);
 EXPORT_SYMBOL(net_enable_timestamp);
 EXPORT_SYMBOL(net_disable_timestamp);
 EXPORT_SYMBOL(dev_get_flags);
+EXPORT_SYMBOL(skb_checksum_setup);
 
 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
 EXPORT_SYMBOL(br_handle_frame_hook);
 
 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
 EXPORT_SYMBOL(br_handle_frame_hook);