return;
seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
-diff -Nurb linux-2.6.27.10-clickport/net/core/dev.c.orig trellis2/net/core/dev.c.orig
---- linux-2.6.27.10-clickport/net/core/dev.c.orig 1969-12-31 19:00:00.000000000 -0500
-+++ trellis2/net/core/dev.c.orig 2009-01-27 16:02:45.000000000 -0500
-@@ -0,0 +1,4951 @@
-+/*
-+ * NET3 Protocol independent device support routines.
-+ *
-+ * This program is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU General Public License
-+ * as published by the Free Software Foundation; either version
-+ * 2 of the License, or (at your option) any later version.
-+ *
-+ * Derived from the non IP parts of dev.c 1.0.19
-+ * Authors: Ross Biro
-+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
-+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
-+ *
-+ * Additional Authors:
-+ * Florian la Roche <rzsfl@rz.uni-sb.de>
-+ * Alan Cox <gw4pts@gw4pts.ampr.org>
-+ * David Hinds <dahinds@users.sourceforge.net>
-+ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
-+ * Adam Sulmicki <adam@cfar.umd.edu>
-+ * Pekka Riikonen <priikone@poesidon.pspt.fi>
-+ *
-+ * Changes:
-+ * D.J. Barrow : Fixed bug where dev->refcnt gets set
-+ * to 2 if register_netdev gets called
-+ * before net_dev_init & also removed a
-+ * few lines of code in the process.
-+ * Alan Cox : device private ioctl copies fields back.
-+ * Alan Cox : Transmit queue code does relevant
-+ * stunts to keep the queue safe.
-+ * Alan Cox : Fixed double lock.
-+ * Alan Cox : Fixed promisc NULL pointer trap
-+ * ???????? : Support the full private ioctl range
-+ * Alan Cox : Moved ioctl permission check into
-+ * drivers
-+ * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
-+ * Alan Cox : 100 backlog just doesn't cut it when
-+ * you start doing multicast video 8)
-+ * Alan Cox : Rewrote net_bh and list manager.
-+ * Alan Cox : Fix ETH_P_ALL echoback lengths.
-+ * Alan Cox : Took out transmit every packet pass
-+ * Saved a few bytes in the ioctl handler
-+ * Alan Cox : Network driver sets packet type before
-+ * calling netif_rx. Saves a function
-+ * call a packet.
-+ * Alan Cox : Hashed net_bh()
-+ * Richard Kooijman: Timestamp fixes.
-+ * Alan Cox : Wrong field in SIOCGIFDSTADDR
-+ * Alan Cox : Device lock protection.
-+ * Alan Cox : Fixed nasty side effect of device close
-+ * changes.
-+ * Rudi Cilibrasi : Pass the right thing to
-+ * set_mac_address()
-+ * Dave Miller : 32bit quantity for the device lock to
-+ * make it work out on a Sparc.
-+ * Bjorn Ekwall : Added KERNELD hack.
-+ * Alan Cox : Cleaned up the backlog initialise.
-+ * Craig Metz : SIOCGIFCONF fix if space for under
-+ * 1 device.
-+ * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
-+ * is no device open function.
-+ * Andi Kleen : Fix error reporting for SIOCGIFCONF
-+ * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
-+ * Cyrus Durgin : Cleaned for KMOD
-+ * Adam Sulmicki : Bug Fix : Network Device Unload
-+ * A network device unload needs to purge
-+ * the backlog queue.
-+ * Paul Rusty Russell : SIOCSIFNAME
-+ * Pekka Riikonen : Netdev boot-time settings code
-+ * Andrew Morton : Make unregister_netdevice wait
-+ * indefinitely on dev->refcnt
-+ * J Hadi Salim : - Backlog queue sampling
-+ * - netif_rx() feedback
-+ */
-+
-+#include <asm/uaccess.h>
-+#include <asm/system.h>
-+#include <linux/bitops.h>
-+#include <linux/capability.h>
-+#include <linux/cpu.h>
-+#include <linux/types.h>
-+#include <linux/kernel.h>
-+#include <linux/sched.h>
-+#include <linux/mutex.h>
-+#include <linux/string.h>
-+#include <linux/mm.h>
-+#include <linux/socket.h>
-+#include <linux/sockios.h>
-+#include <linux/errno.h>
-+#include <linux/interrupt.h>
-+#include <linux/if_ether.h>
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/ethtool.h>
-+#include <linux/notifier.h>
-+#include <linux/skbuff.h>
-+#include <net/net_namespace.h>
-+#include <net/sock.h>
-+#include <linux/rtnetlink.h>
-+#include <linux/proc_fs.h>
-+#include <linux/seq_file.h>
-+#include <linux/stat.h>
-+#include <linux/if_bridge.h>
-+#include <linux/if_macvlan.h>
-+#include <net/dst.h>
-+#include <net/pkt_sched.h>
-+#include <net/checksum.h>
-+#include <linux/highmem.h>
-+#include <linux/init.h>
-+#include <linux/kmod.h>
-+#include <linux/module.h>
-+#include <linux/kallsyms.h>
-+#include <linux/netpoll.h>
-+#include <linux/rcupdate.h>
-+#include <linux/delay.h>
-+#include <net/wext.h>
-+#include <net/iw_handler.h>
-+#include <asm/current.h>
-+#include <linux/audit.h>
-+#include <linux/dmaengine.h>
-+#include <linux/err.h>
-+#include <linux/ctype.h>
-+#include <linux/if_arp.h>
-+#include <linux/if_vlan.h>
-+#include <linux/ip.h>
-+#include <net/ip.h>
-+#include <linux/ipv6.h>
-+#include <linux/in.h>
-+#include <linux/jhash.h>
-+#include <linux/random.h>
-+#include <linux/vs_inet.h>
-+
-+#include "net-sysfs.h"
-+
-+/*
-+ * The list of packet types we will receive (as opposed to discard)
-+ * and the routines to invoke.
-+ *
-+ * Why 16. Because with 16 the only overlap we get on a hash of the
-+ * low nibble of the protocol value is RARP/SNAP/X.25.
-+ *
-+ * NOTE: That is no longer true with the addition of VLAN tags. Not
-+ * sure which should go first, but I bet it won't make much
-+ * difference if we are running VLANs. The good news is that
-+ * this protocol won't be in the list unless compiled in, so
-+ * the average user (w/out VLANs) will not be adversely affected.
-+ * --BLG
-+ *
-+ * 0800 IP
-+ * 8100 802.1Q VLAN
-+ * 0001 802.3
-+ * 0002 AX.25
-+ * 0004 802.2
-+ * 8035 RARP
-+ * 0005 SNAP
-+ * 0805 X.25
-+ * 0806 ARP
-+ * 8137 IPX
-+ * 0009 Localtalk
-+ * 86DD IPv6
-+ */
-+
-+#define PTYPE_HASH_SIZE (16)
-+#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
-+
-+static DEFINE_SPINLOCK(ptype_lock);
-+static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
-+static struct list_head ptype_all __read_mostly; /* Taps */
-+
-+#ifdef CONFIG_NET_DMA
-+struct net_dma {
-+ struct dma_client client;
-+ spinlock_t lock;
-+ cpumask_t channel_mask;
-+ struct dma_chan **channels;
-+};
-+
-+static enum dma_state_client
-+netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
-+ enum dma_state state);
-+
-+static struct net_dma net_dma = {
-+ .client = {
-+ .event_callback = netdev_dma_event,
-+ },
-+};
-+#endif
-+
-+/*
-+ * The @dev_base_head list is protected by @dev_base_lock and the rtnl
-+ * semaphore.
-+ *
-+ * Pure readers hold dev_base_lock for reading.
-+ *
-+ * Writers must hold the rtnl semaphore while they loop through the
-+ * dev_base_head list, and hold dev_base_lock for writing when they do the
-+ * actual updates. This allows pure readers to access the list even
-+ * while a writer is preparing to update it.
-+ *
-+ * To put it another way, dev_base_lock is held for writing only to
-+ * protect against pure readers; the rtnl semaphore provides the
-+ * protection against other writers.
-+ *
-+ * See, for example usages, register_netdevice() and
-+ * unregister_netdevice(), which must be called with the rtnl
-+ * semaphore held.
-+ */
-+DEFINE_RWLOCK(dev_base_lock);
-+
-+EXPORT_SYMBOL(dev_base_lock);
-+
-+#define NETDEV_HASHBITS 8
-+#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
-+
-+static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
-+{
-+ unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
-+ return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
-+}
-+
-+static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
-+{
-+ return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
-+}
-+
-+/* Device list insertion */
-+static int list_netdevice(struct net_device *dev)
-+{
-+ struct net *net = dev_net(dev);
-+
-+ ASSERT_RTNL();
-+
-+ write_lock_bh(&dev_base_lock);
-+ list_add_tail(&dev->dev_list, &net->dev_base_head);
-+ hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
-+ hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
-+ write_unlock_bh(&dev_base_lock);
-+ return 0;
-+}
-+
-+/* Device list removal */
-+static void unlist_netdevice(struct net_device *dev)
-+{
-+ ASSERT_RTNL();
-+
-+ /* Unlink dev from the device chain */
-+ write_lock_bh(&dev_base_lock);
-+ list_del(&dev->dev_list);
-+ hlist_del(&dev->name_hlist);
-+ hlist_del(&dev->index_hlist);
-+ write_unlock_bh(&dev_base_lock);
-+}
-+
-+/* Click: input packet handlers, might steal packets from net_rx_action. */
-+static RAW_NOTIFIER_HEAD(net_in_chain);
-+
-+/*
-+ * Our notifier list
-+ */
-+
-+static RAW_NOTIFIER_HEAD(netdev_chain);
-+
-+/*
-+ * Device drivers call our routines to queue packets here. We empty the
-+ * queue in the local softnet handler.
-+ */
-+
-+DEFINE_PER_CPU(struct softnet_data, softnet_data);
-+
-+#ifdef CONFIG_LOCKDEP
-+/*
-+ * register_netdevice() inits txq->_xmit_lock and sets lockdep class
-+ * according to dev->type
-+ */
-+static const unsigned short netdev_lock_type[] =
-+ {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
-+ ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
-+ ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
-+ ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
-+ ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
-+ ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
-+ ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
-+ ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
-+ ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
-+ ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
-+ ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
-+ ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
-+ ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
-+ ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
-+ ARPHRD_NONE};
-+
-+static const char *netdev_lock_name[] =
-+ {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
-+ "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
-+ "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
-+ "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
-+ "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
-+ "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
-+ "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
-+ "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
-+ "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
-+ "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
-+ "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
-+ "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
-+ "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
-+ "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
-+ "_xmit_NONE"};
-+
-+static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
-+static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
-+
-+static inline unsigned short netdev_lock_pos(unsigned short dev_type)
-+{
-+ int i;
-+
-+ for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
-+ if (netdev_lock_type[i] == dev_type)
-+ return i;
-+ /* the last key is used by default */
-+ return ARRAY_SIZE(netdev_lock_type) - 1;
-+}
-+
-+static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
-+ unsigned short dev_type)
-+{
-+ int i;
-+
-+ i = netdev_lock_pos(dev_type);
-+ lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
-+ netdev_lock_name[i]);
-+}
-+
-+static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
-+{
-+ int i;
-+
-+ i = netdev_lock_pos(dev->type);
-+ lockdep_set_class_and_name(&dev->addr_list_lock,
-+ &netdev_addr_lock_key[i],
-+ netdev_lock_name[i]);
-+}
-+#else
-+static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
-+ unsigned short dev_type)
-+{
-+}
-+static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
-+{
-+}
-+#endif
-+
-+/*******************************************************************************
-+
-+ Protocol management and registration routines
-+
-+*******************************************************************************/
-+
-+/*
-+ * Add a protocol ID to the list. Now that the input handler is
-+ * smarter we can dispense with all the messy stuff that used to be
-+ * here.
-+ *
-+ * BEWARE!!! Protocol handlers, mangling input packets,
-+ * MUST BE last in hash buckets and checking protocol handlers
-+ * MUST start from promiscuous ptype_all chain in net_bh.
-+ * It is true now, do not change it.
-+ * Explanation follows: if protocol handler, mangling packet, will
-+ * be the first on list, it is not able to sense, that packet
-+ * is cloned and should be copied-on-write, so that it will
-+ * change it and subsequent readers will get broken packet.
-+ * --ANK (980803)
-+ */
-+
-+/**
-+ * dev_add_pack - add packet handler
-+ * @pt: packet type declaration
-+ *
-+ * Add a protocol handler to the networking stack. The passed &packet_type
-+ * is linked into kernel lists and may not be freed until it has been
-+ * removed from the kernel lists.
-+ *
-+ * This call does not sleep therefore it can not
-+ * guarantee all CPU's that are in middle of receiving packets
-+ * will see the new packet type (until the next received packet).
-+ */
-+
-+void dev_add_pack(struct packet_type *pt)
-+{
-+ int hash;
-+
-+ spin_lock_bh(&ptype_lock);
-+ if (pt->type == htons(ETH_P_ALL))
-+ list_add_rcu(&pt->list, &ptype_all);
-+ else {
-+ hash = ntohs(pt->type) & PTYPE_HASH_MASK;
-+ list_add_rcu(&pt->list, &ptype_base[hash]);
-+ }
-+ spin_unlock_bh(&ptype_lock);
-+}
-+
-+/**
-+ * __dev_remove_pack - remove packet handler
-+ * @pt: packet type declaration
-+ *
-+ * Remove a protocol handler that was previously added to the kernel
-+ * protocol handlers by dev_add_pack(). The passed &packet_type is removed
-+ * from the kernel lists and can be freed or reused once this function
-+ * returns.
-+ *
-+ * The packet type might still be in use by receivers
-+ * and must not be freed until after all the CPU's have gone
-+ * through a quiescent state.
-+ */
-+void __dev_remove_pack(struct packet_type *pt)
-+{
-+ struct list_head *head;
-+ struct packet_type *pt1;
-+
-+ spin_lock_bh(&ptype_lock);
-+
-+ if (pt->type == htons(ETH_P_ALL))
-+ head = &ptype_all;
-+ else
-+ head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
-+
-+ list_for_each_entry(pt1, head, list) {
-+ if (pt == pt1) {
-+ list_del_rcu(&pt->list);
-+ goto out;
-+ }
-+ }
-+
-+ printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
-+out:
-+ spin_unlock_bh(&ptype_lock);
-+}
-+/**
-+ * dev_remove_pack - remove packet handler
-+ * @pt: packet type declaration
-+ *
-+ * Remove a protocol handler that was previously added to the kernel
-+ * protocol handlers by dev_add_pack(). The passed &packet_type is removed
-+ * from the kernel lists and can be freed or reused once this function
-+ * returns.
-+ *
-+ * This call sleeps to guarantee that no CPU is looking at the packet
-+ * type after return.
-+ */
-+void dev_remove_pack(struct packet_type *pt)
-+{
-+ __dev_remove_pack(pt);
-+
-+ synchronize_net();
-+}
-+
-+/******************************************************************************
-+
-+ Device Boot-time Settings Routines
-+
-+*******************************************************************************/
-+
-+/* Boot time configuration table */
-+static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
-+
-+/**
-+ * netdev_boot_setup_add - add new setup entry
-+ * @name: name of the device
-+ * @map: configured settings for the device
-+ *
-+ * Adds new setup entry to the dev_boot_setup list. The function
-+ * returns 0 on error and 1 on success. This is a generic routine to
-+ * all netdevices.
-+ */
-+static int netdev_boot_setup_add(char *name, struct ifmap *map)
-+{
-+ struct netdev_boot_setup *s;
-+ int i;
-+
-+ s = dev_boot_setup;
-+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
-+ if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
-+ memset(s[i].name, 0, sizeof(s[i].name));
-+ strlcpy(s[i].name, name, IFNAMSIZ);
-+ memcpy(&s[i].map, map, sizeof(s[i].map));
-+ break;
-+ }
-+ }
-+
-+ return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
-+}
-+
-+/**
-+ * netdev_boot_setup_check - check boot time settings
-+ * @dev: the netdevice
-+ *
-+ * Check boot time settings for the device.
-+ * The found settings are set for the device to be used
-+ * later in the device probing.
-+ * Returns 0 if no settings found, 1 if they are.
-+ */
-+int netdev_boot_setup_check(struct net_device *dev)
-+{
-+ struct netdev_boot_setup *s = dev_boot_setup;
-+ int i;
-+
-+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
-+ if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
-+ !strcmp(dev->name, s[i].name)) {
-+ dev->irq = s[i].map.irq;
-+ dev->base_addr = s[i].map.base_addr;
-+ dev->mem_start = s[i].map.mem_start;
-+ dev->mem_end = s[i].map.mem_end;
-+ return 1;
-+ }
-+ }
-+ return 0;
-+}
-+
-+
-+/**
-+ * netdev_boot_base - get address from boot time settings
-+ * @prefix: prefix for network device
-+ * @unit: id for network device
-+ *
-+ * Check boot time settings for the base address of device.
-+ * The found settings are set for the device to be used
-+ * later in the device probing.
-+ * Returns 0 if no settings found.
-+ */
-+unsigned long netdev_boot_base(const char *prefix, int unit)
-+{
-+ const struct netdev_boot_setup *s = dev_boot_setup;
-+ char name[IFNAMSIZ];
-+ int i;
-+
-+ sprintf(name, "%s%d", prefix, unit);
-+
-+ /*
-+ * If device already registered then return base of 1
-+ * to indicate not to probe for this interface
-+ */
-+ if (__dev_get_by_name(&init_net, name))
-+ return 1;
-+
-+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
-+ if (!strcmp(name, s[i].name))
-+ return s[i].map.base_addr;
-+ return 0;
-+}
-+
-+/*
-+ * Saves at boot time configured settings for any netdevice.
-+ */
-+int __init netdev_boot_setup(char *str)
-+{
-+ int ints[5];
-+ struct ifmap map;
-+
-+ str = get_options(str, ARRAY_SIZE(ints), ints);
-+ if (!str || !*str)
-+ return 0;
-+
-+ /* Save settings */
-+ memset(&map, 0, sizeof(map));
-+ if (ints[0] > 0)
-+ map.irq = ints[1];
-+ if (ints[0] > 1)
-+ map.base_addr = ints[2];
-+ if (ints[0] > 2)
-+ map.mem_start = ints[3];
-+ if (ints[0] > 3)
-+ map.mem_end = ints[4];
-+
-+ /* Add new entry to the list */
-+ return netdev_boot_setup_add(str, &map);
-+}
-+
-+__setup("netdev=", netdev_boot_setup);
-+
-+/*******************************************************************************
-+
-+ Device Interface Subroutines
-+
-+*******************************************************************************/
-+
-+/**
-+ * __dev_get_by_name - find a device by its name
-+ * @net: the applicable net namespace
-+ * @name: name to find
-+ *
-+ * Find an interface by name. Must be called under RTNL semaphore
-+ * or @dev_base_lock. If the name is found a pointer to the device
-+ * is returned. If the name is not found then %NULL is returned. The
-+ * reference counters are not incremented so the caller must be
-+ * careful with locks.
-+ */
-+
-+struct net_device *__dev_get_by_name(struct net *net, const char *name)
-+{
-+ struct hlist_node *p;
-+
-+ hlist_for_each(p, dev_name_hash(net, name)) {
-+ struct net_device *dev
-+ = hlist_entry(p, struct net_device, name_hlist);
-+ if (!strncmp(dev->name, name, IFNAMSIZ))
-+ return dev;
-+ }
-+ return NULL;
-+}
-+
-+/**
-+ * dev_get_by_name - find a device by its name
-+ * @net: the applicable net namespace
-+ * @name: name to find
-+ *
-+ * Find an interface by name. This can be called from any
-+ * context and does its own locking. The returned handle has
-+ * the usage count incremented and the caller must use dev_put() to
-+ * release it when it is no longer needed. %NULL is returned if no
-+ * matching device is found.
-+ */
-+
-+struct net_device *dev_get_by_name(struct net *net, const char *name)
-+{
-+ struct net_device *dev;
-+
-+ read_lock(&dev_base_lock);
-+ dev = __dev_get_by_name(net, name);
-+ if (dev)
-+ dev_hold(dev);
-+ read_unlock(&dev_base_lock);
-+ return dev;
-+}
-+
-+/**
-+ * __dev_get_by_index - find a device by its ifindex
-+ * @net: the applicable net namespace
-+ * @ifindex: index of device
-+ *
-+ * Search for an interface by index. Returns %NULL if the device
-+ * is not found or a pointer to the device. The device has not
-+ * had its reference counter increased so the caller must be careful
-+ * about locking. The caller must hold either the RTNL semaphore
-+ * or @dev_base_lock.
-+ */
-+
-+struct net_device *__dev_get_by_index(struct net *net, int ifindex)
-+{
-+ struct hlist_node *p;
-+
-+ hlist_for_each(p, dev_index_hash(net, ifindex)) {
-+ struct net_device *dev
-+ = hlist_entry(p, struct net_device, index_hlist);
-+ if (dev->ifindex == ifindex)
-+ return dev;
-+ }
-+ return NULL;
-+}
-+
-+
-+/**
-+ * dev_get_by_index - find a device by its ifindex
-+ * @net: the applicable net namespace
-+ * @ifindex: index of device
-+ *
-+ * Search for an interface by index. Returns NULL if the device
-+ * is not found or a pointer to the device. The device returned has
-+ * had a reference added and the pointer is safe until the user calls
-+ * dev_put to indicate they have finished with it.
-+ */
-+
-+struct net_device *dev_get_by_index(struct net *net, int ifindex)
-+{
-+ struct net_device *dev;
-+
-+ read_lock(&dev_base_lock);
-+ dev = __dev_get_by_index(net, ifindex);
-+ if (dev)
-+ dev_hold(dev);
-+ read_unlock(&dev_base_lock);
-+ return dev;
-+}
-+
-+/**
-+ * dev_getbyhwaddr - find a device by its hardware address
-+ * @net: the applicable net namespace
-+ * @type: media type of device
-+ * @ha: hardware address
-+ *
-+ * Search for an interface by MAC address. Returns NULL if the device
-+ * is not found or a pointer to the device. The caller must hold the
-+ * rtnl semaphore. The returned device has not had its ref count increased
-+ * and the caller must therefore be careful about locking
-+ *
-+ * BUGS:
-+ * If the API was consistent this would be __dev_get_by_hwaddr
-+ */
-+
-+struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
-+{
-+ struct net_device *dev;
-+
-+ ASSERT_RTNL();
-+
-+ for_each_netdev(net, dev)
-+ if (dev->type == type &&
-+ !memcmp(dev->dev_addr, ha, dev->addr_len))
-+ return dev;
-+
-+ return NULL;
-+}
-+
-+EXPORT_SYMBOL(dev_getbyhwaddr);
-+
-+struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
-+{
-+ struct net_device *dev;
-+
-+ ASSERT_RTNL();
-+ for_each_netdev(net, dev)
-+ if (dev->type == type)
-+ return dev;
-+
-+ return NULL;
-+}
-+
-+EXPORT_SYMBOL(__dev_getfirstbyhwtype);
-+
-+struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
-+{
-+ struct net_device *dev;
-+
-+ rtnl_lock();
-+ dev = __dev_getfirstbyhwtype(net, type);
-+ if (dev)
-+ dev_hold(dev);
-+ rtnl_unlock();
-+ return dev;
-+}
-+
-+EXPORT_SYMBOL(dev_getfirstbyhwtype);
-+
-+/**
-+ * dev_get_by_flags - find any device with given flags
-+ * @net: the applicable net namespace
-+ * @if_flags: IFF_* values
-+ * @mask: bitmask of bits in if_flags to check
-+ *
-+ * Search for any interface with the given flags. Returns NULL if a device
-+ * is not found or a pointer to the device. The device returned has
-+ * had a reference added and the pointer is safe until the user calls
-+ * dev_put to indicate they have finished with it.
-+ */
-+
-+struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
-+{
-+ struct net_device *dev, *ret;
-+
-+ ret = NULL;
-+ read_lock(&dev_base_lock);
-+ for_each_netdev(net, dev) {
-+ if (((dev->flags ^ if_flags) & mask) == 0) {
-+ dev_hold(dev);
-+ ret = dev;
-+ break;
-+ }
-+ }
-+ read_unlock(&dev_base_lock);
-+ return ret;
-+}
-+
-+/**
-+ * dev_valid_name - check if name is okay for network device
-+ * @name: name string
-+ *
-+ * Network device names need to be valid file names to
-+ * to allow sysfs to work. We also disallow any kind of
-+ * whitespace.
-+ */
-+int dev_valid_name(const char *name)
-+{
-+ if (*name == '\0')
-+ return 0;
-+ if (strlen(name) >= IFNAMSIZ)
-+ return 0;
-+ if (!strcmp(name, ".") || !strcmp(name, ".."))
-+ return 0;
-+
-+ while (*name) {
-+ if (*name == '/' || isspace(*name))
-+ return 0;
-+ name++;
-+ }
-+ return 1;
-+}
-+
-+/**
-+ * __dev_alloc_name - allocate a name for a device
-+ * @net: network namespace to allocate the device name in
-+ * @name: name format string
-+ * @buf: scratch buffer and result name string
-+ *
-+ * Passed a format string - eg "lt%d" it will try and find a suitable
-+ * id. It scans list of devices to build up a free map, then chooses
-+ * the first empty slot. The caller must hold the dev_base or rtnl lock
-+ * while allocating the name and adding the device in order to avoid
-+ * duplicates.
-+ * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
-+ * Returns the number of the unit assigned or a negative errno code.
-+ */
-+
-+static int __dev_alloc_name(struct net *net, const char *name, char *buf)
-+{
-+ int i = 0;
-+ const char *p;
-+ const int max_netdevices = 8*PAGE_SIZE;
-+ unsigned long *inuse;
-+ struct net_device *d;
-+
-+ p = strnchr(name, IFNAMSIZ-1, '%');
-+ if (p) {
-+ /*
-+ * Verify the string as this thing may have come from
-+ * the user. There must be either one "%d" and no other "%"
-+ * characters.
-+ */
-+ if (p[1] != 'd' || strchr(p + 2, '%'))
-+ return -EINVAL;
-+
-+ /* Use one page as a bit array of possible slots */
-+ inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
-+ if (!inuse)
-+ return -ENOMEM;
-+
-+ for_each_netdev(net, d) {
-+ if (!sscanf(d->name, name, &i))
-+ continue;
-+ if (i < 0 || i >= max_netdevices)
-+ continue;
-+
-+ /* avoid cases where sscanf is not exact inverse of printf */
-+ snprintf(buf, IFNAMSIZ, name, i);
-+ if (!strncmp(buf, d->name, IFNAMSIZ))
-+ set_bit(i, inuse);
-+ }
-+
-+ i = find_first_zero_bit(inuse, max_netdevices);
-+ free_page((unsigned long) inuse);
-+ }
-+
-+ snprintf(buf, IFNAMSIZ, name, i);
-+ if (!__dev_get_by_name(net, buf))
-+ return i;
-+
-+ /* It is possible to run out of possible slots
-+ * when the name is long and there isn't enough space left
-+ * for the digits, or if all bits are used.
-+ */
-+ return -ENFILE;
-+}
-+
-+/**
-+ * dev_alloc_name - allocate a name for a device
-+ * @dev: device
-+ * @name: name format string
-+ *
-+ * Passed a format string - eg "lt%d" it will try and find a suitable
-+ * id. It scans list of devices to build up a free map, then chooses
-+ * the first empty slot. The caller must hold the dev_base or rtnl lock
-+ * while allocating the name and adding the device in order to avoid
-+ * duplicates.
-+ * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
-+ * Returns the number of the unit assigned or a negative errno code.
-+ */
-+
-+int dev_alloc_name(struct net_device *dev, const char *name)
-+{
-+ char buf[IFNAMSIZ];
-+ struct net *net;
-+ int ret;
-+
-+ BUG_ON(!dev_net(dev));
-+ net = dev_net(dev);
-+ ret = __dev_alloc_name(net, name, buf);
-+ if (ret >= 0)
-+ strlcpy(dev->name, buf, IFNAMSIZ);
-+ return ret;
-+}
-+
-+
-+/**
-+ * dev_change_name - change name of a device
-+ * @dev: device
-+ * @newname: name (or format string) must be at least IFNAMSIZ
-+ *
-+ * Change name of a device, can pass format strings "eth%d".
-+ * for wildcarding.
-+ */
-+int dev_change_name(struct net_device *dev, char *newname)
-+{
-+ char oldname[IFNAMSIZ];
-+ int err = 0;
-+ int ret;
-+ struct net *net;
-+
-+ ASSERT_RTNL();
-+ BUG_ON(!dev_net(dev));
-+
-+ net = dev_net(dev);
-+ if (dev->flags & IFF_UP)
-+ return -EBUSY;
-+
-+ if (!dev_valid_name(newname))
-+ return -EINVAL;
-+
-+ if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
-+ return 0;
-+
-+ memcpy(oldname, dev->name, IFNAMSIZ);
-+
-+ if (strchr(newname, '%')) {
-+ err = dev_alloc_name(dev, newname);
-+ if (err < 0)
-+ return err;
-+ strcpy(newname, dev->name);
-+ }
-+ else if (__dev_get_by_name(net, newname))
-+ return -EEXIST;
-+ else
-+ strlcpy(dev->name, newname, IFNAMSIZ);
-+
-+rollback:
-+ /* For now only devices in the initial network namespace
-+ * are in sysfs.
-+ */
-+ if (net == &init_net) {
-+ ret = device_rename(&dev->dev, dev->name);
-+ if (ret) {
-+ memcpy(dev->name, oldname, IFNAMSIZ);
-+ return ret;
-+ }
-+ }
-+
-+ write_lock_bh(&dev_base_lock);
-+ hlist_del(&dev->name_hlist);
-+ hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
-+ write_unlock_bh(&dev_base_lock);
-+
-+ ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
-+ ret = notifier_to_errno(ret);
-+
-+ if (ret) {
-+ if (err) {
-+ printk(KERN_ERR
-+ "%s: name change rollback failed: %d.\n",
-+ dev->name, ret);
-+ } else {
-+ err = ret;
-+ memcpy(dev->name, oldname, IFNAMSIZ);
-+ goto rollback;
-+ }
-+ }
-+
-+ return err;
-+}
-+
-+/**
-+ * netdev_features_change - device changes features
-+ * @dev: device to cause notification
-+ *
-+ * Called to indicate a device has changed features.
-+ */
-+void netdev_features_change(struct net_device *dev)
-+{
-+ call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
-+}
-+EXPORT_SYMBOL(netdev_features_change);
-+
-+/**
-+ * netdev_state_change - device changes state
-+ * @dev: device to cause notification
-+ *
-+ * Called to indicate a device has changed state. This function calls
-+ * the notifier chains for netdev_chain and sends a NEWLINK message
-+ * to the routing socket.
-+ */
-+void netdev_state_change(struct net_device *dev)
-+{
-+ if (dev->flags & IFF_UP) {
-+ call_netdevice_notifiers(NETDEV_CHANGE, dev);
-+ rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
-+ }
-+}
-+
-+void netdev_bonding_change(struct net_device *dev)
-+{
-+ call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
-+}
-+EXPORT_SYMBOL(netdev_bonding_change);
-+
-+/**
-+ * dev_load - load a network module
-+ * @net: the applicable net namespace
-+ * @name: name of interface
-+ *
-+ * If a network interface is not present and the process has suitable
-+ * privileges this function loads the module. If module loading is not
-+ * available in this kernel then it becomes a nop.
-+ */
-+
-+void dev_load(struct net *net, const char *name)
-+{
-+ struct net_device *dev;
-+
-+ read_lock(&dev_base_lock);
-+ dev = __dev_get_by_name(net, name);
-+ read_unlock(&dev_base_lock);
-+
-+ if (!dev && capable(CAP_SYS_MODULE))
-+ request_module("%s", name);
-+}
-+
-+/**
-+ * dev_open - prepare an interface for use.
-+ * @dev: device to open
-+ *
-+ * Takes a device from down to up state. The device's private open
-+ * function is invoked and then the multicast lists are loaded. Finally
-+ * the device is moved into the up state and a %NETDEV_UP message is
-+ * sent to the netdev notifier chain.
-+ *
-+ * Calling this function on an active interface is a nop. On a failure
-+ * a negative errno code is returned.
-+ */
-+int dev_open(struct net_device *dev)
-+{
-+ int ret = 0;
-+
-+ ASSERT_RTNL();
-+
-+ /*
-+ * Is it already up?
-+ */
-+
-+ if (dev->flags & IFF_UP)
-+ return 0;
-+
-+ /*
-+ * Is it even present?
-+ */
-+ if (!netif_device_present(dev))
-+ return -ENODEV;
-+
-+ /*
-+ * Call device private open method
-+ */
-+ set_bit(__LINK_STATE_START, &dev->state);
-+
-+ if (dev->validate_addr)
-+ ret = dev->validate_addr(dev);
-+
-+ if (!ret && dev->open)
-+ ret = dev->open(dev);
-+
-+ /*
-+ * If it went open OK then:
-+ */
-+
-+ if (ret)
-+ clear_bit(__LINK_STATE_START, &dev->state);
-+ else {
-+ /*
-+ * Set the flags.
-+ */
-+ dev->flags |= IFF_UP;
-+
-+ /*
-+ * Initialize multicasting status
-+ */
-+ dev_set_rx_mode(dev);
-+
-+ /*
-+ * Wakeup transmit queue engine
-+ */
-+ dev_activate(dev);
-+
-+ /*
-+ * ... and announce new interface.
-+ */
-+ call_netdevice_notifiers(NETDEV_UP, dev);
-+ }
-+
-+ return ret;
-+}
-+
-+/**
-+ * dev_close - shutdown an interface.
-+ * @dev: device to shutdown
-+ *
-+ * This function moves an active device into down state. A
-+ * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
-+ * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
-+ * chain.
-+ */
-+int dev_close(struct net_device *dev)
-+{
-+ ASSERT_RTNL();
-+
-+ might_sleep();
-+
-+ if (!(dev->flags & IFF_UP))
-+ return 0;
-+
-+ /*
-+ * Tell people we are going down, so that they can
-+ * prepare to death, when device is still operating.
-+ */
-+ call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
-+
-+ clear_bit(__LINK_STATE_START, &dev->state);
-+
-+ /* Synchronize to scheduled poll. We cannot touch poll list,
-+ * it can be even on different cpu. So just clear netif_running().
-+ *
-+ * dev->stop() will invoke napi_disable() on all of it's
-+ * napi_struct instances on this device.
-+ */
-+ smp_mb__after_clear_bit(); /* Commit netif_running(). */
-+
-+ dev_deactivate(dev);
-+
-+ /*
-+ * Call the device specific close. This cannot fail.
-+ * Only if device is UP
-+ *
-+ * We allow it to be called even after a DETACH hot-plug
-+ * event.
-+ */
-+ if (dev->stop)
-+ dev->stop(dev);
-+
-+ /*
-+ * Device is now down.
-+ */
-+
-+ dev->flags &= ~IFF_UP;
-+
-+ /*
-+ * Tell people we are down
-+ */
-+ call_netdevice_notifiers(NETDEV_DOWN, dev);
-+
-+ return 0;
-+}
-+
-+
-+/**
-+ * dev_disable_lro - disable Large Receive Offload on a device
-+ * @dev: device
-+ *
-+ * Disable Large Receive Offload (LRO) on a net device. Must be
-+ * called under RTNL. This is needed if received packets may be
-+ * forwarded to another interface.
-+ */
-+void dev_disable_lro(struct net_device *dev)
-+{
-+ if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
-+ dev->ethtool_ops->set_flags) {
-+ u32 flags = dev->ethtool_ops->get_flags(dev);
-+ if (flags & ETH_FLAG_LRO) {
-+ flags &= ~ETH_FLAG_LRO;
-+ dev->ethtool_ops->set_flags(dev, flags);
-+ }
-+ }
-+ WARN_ON(dev->features & NETIF_F_LRO);
-+}
-+EXPORT_SYMBOL(dev_disable_lro);
-+
-+
-+static int dev_boot_phase = 1;
-+
-+/*
-+ * Device change register/unregister. These are not inline or static
-+ * as we export them to the world.
-+ */
-+
-+/**
-+ * register_netdevice_notifier - register a network notifier block
-+ * @nb: notifier
-+ *
-+ * Register a notifier to be called when network device events occur.
-+ * The notifier passed is linked into the kernel structures and must
-+ * not be reused until it has been unregistered. A negative errno code
-+ * is returned on a failure.
-+ *
-+ * When registered all registration and up events are replayed
-+ * to the new notifier to allow device to have a race free
-+ * view of the network device list.
-+ */
-+
-+int register_netdevice_notifier(struct notifier_block *nb)
-+{
-+ struct net_device *dev;
-+ struct net_device *last;
-+ struct net *net;
-+ int err;
-+
-+ rtnl_lock();
-+ err = raw_notifier_chain_register(&netdev_chain, nb);
-+ if (err)
-+ goto unlock;
-+ if (dev_boot_phase)
-+ goto unlock;
-+ for_each_net(net) {
-+ for_each_netdev(net, dev) {
-+ err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
-+ err = notifier_to_errno(err);
-+ if (err)
-+ goto rollback;
-+
-+ if (!(dev->flags & IFF_UP))
-+ continue;
-+
-+ nb->notifier_call(nb, NETDEV_UP, dev);
-+ }
-+ }
-+
-+unlock:
-+ rtnl_unlock();
-+ return err;
-+
-+rollback:
-+ last = dev;
-+ for_each_net(net) {
-+ for_each_netdev(net, dev) {
-+ if (dev == last)
-+ break;
-+
-+ if (dev->flags & IFF_UP) {
-+ nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
-+ nb->notifier_call(nb, NETDEV_DOWN, dev);
-+ }
-+ nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
-+ }
-+ }
-+
-+ raw_notifier_chain_unregister(&netdev_chain, nb);
-+ goto unlock;
-+}
-+
-+/**
-+ * unregister_netdevice_notifier - unregister a network notifier block
-+ * @nb: notifier
-+ *
-+ * Unregister a notifier previously registered by
-+ * register_netdevice_notifier(). The notifier is unlinked into the
-+ * kernel structures and may then be reused. A negative errno code
-+ * is returned on a failure.
-+ */
-+
-+int unregister_netdevice_notifier(struct notifier_block *nb)
-+{
-+ int err;
-+
-+ rtnl_lock();
-+ err = raw_notifier_chain_unregister(&netdev_chain, nb);
-+ rtnl_unlock();
-+ return err;
-+}
-+
-+/**
-+ * call_netdevice_notifiers - call all network notifier blocks
-+ * @val: value passed unmodified to notifier function
-+ * @dev: net_device pointer passed unmodified to notifier function
-+ *
-+ * Call all network notifier blocks. Parameters and return value
-+ * are as for raw_notifier_call_chain().
-+ */
-+
-+int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
-+{
-+ return raw_notifier_call_chain(&netdev_chain, val, dev);
-+}
-+
-+/* When > 0 there are consumers of rx skb time stamps */
-+static atomic_t netstamp_needed = ATOMIC_INIT(0);
-+
-+void net_enable_timestamp(void)
-+{
-+ atomic_inc(&netstamp_needed);
-+}
-+
-+void net_disable_timestamp(void)
-+{
-+ atomic_dec(&netstamp_needed);
-+}
-+
-+static inline void net_timestamp(struct sk_buff *skb)
-+{
-+ if (atomic_read(&netstamp_needed))
-+ __net_timestamp(skb);
-+ else
-+ skb->tstamp.tv64 = 0;
-+}
-+
-+/*
-+ * Support routine. Sends outgoing frames to any network
-+ * taps currently in use.
-+ */
-+
-+static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
-+{
-+ struct packet_type *ptype;
-+
-+ net_timestamp(skb);
-+
-+ rcu_read_lock();
-+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
-+ /* Never send packets back to the socket
-+ * they originated from - MvS (miquels@drinkel.ow.org)
-+ */
-+ if ((ptype->dev == dev || !ptype->dev) &&
-+ (ptype->af_packet_priv == NULL ||
-+ (struct sock *)ptype->af_packet_priv != skb->sk)) {
-+ struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
-+ if (!skb2)
-+ break;
-+
-+ /* skb->nh should be correctly
-+ set by sender, so that the second statement is
-+ just protection against buggy protocols.
-+ */
-+ skb_reset_mac_header(skb2);
-+
-+ if (skb_network_header(skb2) < skb2->data ||
-+ skb2->network_header > skb2->tail) {
-+ if (net_ratelimit())
-+ printk(KERN_CRIT "protocol %04x is "
-+ "buggy, dev %s\n",
-+ skb2->protocol, dev->name);
-+ skb_reset_network_header(skb2);
-+ }
-+
-+ skb2->transport_header = skb2->network_header;
-+ skb2->pkt_type = PACKET_OUTGOING;
-+ ptype->func(skb2, skb->dev, ptype, skb->dev);
-+ }
-+ }
-+ rcu_read_unlock();
-+}
-+
-+
-+static inline void __netif_reschedule(struct Qdisc *q)
-+{
-+ struct softnet_data *sd;
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
-+ sd = &__get_cpu_var(softnet_data);
-+ q->next_sched = sd->output_queue;
-+ sd->output_queue = q;
-+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
-+ local_irq_restore(flags);
-+}
-+
-+void __netif_schedule(struct Qdisc *q)
-+{
-+ if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
-+ __netif_reschedule(q);
-+}
-+EXPORT_SYMBOL(__netif_schedule);
-+
-+void dev_kfree_skb_irq(struct sk_buff *skb)
-+{
-+ if (atomic_dec_and_test(&skb->users)) {
-+ struct softnet_data *sd;
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
-+ sd = &__get_cpu_var(softnet_data);
-+ skb->next = sd->completion_queue;
-+ sd->completion_queue = skb;
-+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
-+ local_irq_restore(flags);
-+ }
-+}
-+EXPORT_SYMBOL(dev_kfree_skb_irq);
-+
-+void dev_kfree_skb_any(struct sk_buff *skb)
-+{
-+ if (in_irq() || irqs_disabled())
-+ dev_kfree_skb_irq(skb);
-+ else
-+ dev_kfree_skb(skb);
-+}
-+EXPORT_SYMBOL(dev_kfree_skb_any);
-+
-+
-+/**
-+ * netif_device_detach - mark device as removed
-+ * @dev: network device
-+ *
-+ * Mark device as removed from system and therefore no longer available.
-+ */
-+void netif_device_detach(struct net_device *dev)
-+{
-+ if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
-+ netif_running(dev)) {
-+ netif_stop_queue(dev);
-+ }
-+}
-+EXPORT_SYMBOL(netif_device_detach);
-+
-+/**
-+ * netif_device_attach - mark device as attached
-+ * @dev: network device
-+ *
-+ * Mark device as attached from system and restart if needed.
-+ */
-+void netif_device_attach(struct net_device *dev)
-+{
-+ if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
-+ netif_running(dev)) {
-+ netif_wake_queue(dev);
-+ __netdev_watchdog_up(dev);
-+ }
-+}
-+EXPORT_SYMBOL(netif_device_attach);
-+
-+static bool can_checksum_protocol(unsigned long features, __be16 protocol)
-+{
-+ return ((features & NETIF_F_GEN_CSUM) ||
-+ ((features & NETIF_F_IP_CSUM) &&
-+ protocol == htons(ETH_P_IP)) ||
-+ ((features & NETIF_F_IPV6_CSUM) &&
-+ protocol == htons(ETH_P_IPV6)));
-+}
-+
-+static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
-+{
-+ if (can_checksum_protocol(dev->features, skb->protocol))
-+ return true;
-+
-+ if (skb->protocol == htons(ETH_P_8021Q)) {
-+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
-+ if (can_checksum_protocol(dev->features & dev->vlan_features,
-+ veh->h_vlan_encapsulated_proto))
-+ return true;
-+ }
-+
-+ return false;
-+}
-+
-+/*
-+ * Invalidate hardware checksum when packet is to be mangled, and
-+ * complete checksum manually on outgoing path.
-+ */
-+int skb_checksum_help(struct sk_buff *skb)
-+{
-+ __wsum csum;
-+ int ret = 0, offset;
-+
-+ if (skb->ip_summed == CHECKSUM_COMPLETE)
-+ goto out_set_summed;
-+
-+ if (unlikely(skb_shinfo(skb)->gso_size)) {
-+ /* Let GSO fix up the checksum. */
-+ goto out_set_summed;
-+ }
-+
-+ offset = skb->csum_start - skb_headroom(skb);
-+ BUG_ON(offset >= skb_headlen(skb));
-+ csum = skb_checksum(skb, offset, skb->len - offset, 0);
-+
-+ offset += skb->csum_offset;
-+ BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
-+
-+ if (skb_cloned(skb) &&
-+ !skb_clone_writable(skb, offset + sizeof(__sum16))) {
-+ ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-+ if (ret)
-+ goto out;
-+ }
-+
-+ *(__sum16 *)(skb->data + offset) = csum_fold(csum);
-+out_set_summed:
-+ skb->ip_summed = CHECKSUM_NONE;
-+out:
-+ return ret;
-+}
-+
-+/**
-+ * skb_gso_segment - Perform segmentation on skb.
-+ * @skb: buffer to segment
-+ * @features: features for the output path (see dev->features)
-+ *
-+ * This function segments the given skb and returns a list of segments.
-+ *
-+ * It may return NULL if the skb requires no segmentation. This is
-+ * only possible when GSO is used for verifying header integrity.
-+ */
-+struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
-+{
-+ struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
-+ struct packet_type *ptype;
-+ __be16 type = skb->protocol;
-+ int err;
-+
-+ BUG_ON(skb_shinfo(skb)->frag_list);
-+
-+ skb_reset_mac_header(skb);
-+ skb->mac_len = skb->network_header - skb->mac_header;
-+ __skb_pull(skb, skb->mac_len);
-+
-+ if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
-+ if (skb_header_cloned(skb) &&
-+ (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
-+ return ERR_PTR(err);
-+ }
-+
-+ rcu_read_lock();
-+ list_for_each_entry_rcu(ptype,
-+ &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
-+ if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
-+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
-+ err = ptype->gso_send_check(skb);
-+ segs = ERR_PTR(err);
-+ if (err || skb_gso_ok(skb, features))
-+ break;
-+ __skb_push(skb, (skb->data -
-+ skb_network_header(skb)));
-+ }
-+ segs = ptype->gso_segment(skb, features);
-+ break;
-+ }
-+ }
-+ rcu_read_unlock();
-+
-+ __skb_push(skb, skb->data - skb_mac_header(skb));
-+
-+ return segs;
-+}
-+
-+EXPORT_SYMBOL(skb_gso_segment);
-+
-+/* Take action when hardware reception checksum errors are detected. */
-+#ifdef CONFIG_BUG
-+void netdev_rx_csum_fault(struct net_device *dev)
-+{
-+ if (net_ratelimit()) {
-+ printk(KERN_ERR "%s: hw csum failure.\n",
-+ dev ? dev->name : "<unknown>");
-+ dump_stack();
-+ }
-+}
-+EXPORT_SYMBOL(netdev_rx_csum_fault);
-+#endif
-+
-+/* Actually, we should eliminate this check as soon as we know, that:
-+ * 1. IOMMU is present and allows to map all the memory.
-+ * 2. No high memory really exists on this machine.
-+ */
-+
-+static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
-+{
-+#ifdef CONFIG_HIGHMEM
-+ int i;
-+
-+ if (dev->features & NETIF_F_HIGHDMA)
-+ return 0;
-+
-+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-+ if (PageHighMem(skb_shinfo(skb)->frags[i].page))
-+ return 1;
-+
-+#endif
-+ return 0;
-+}
-+
-+struct dev_gso_cb {
-+ void (*destructor)(struct sk_buff *skb);
-+};
-+
-+#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
-+
-+static void dev_gso_skb_destructor(struct sk_buff *skb)
-+{
-+ struct dev_gso_cb *cb;
-+
-+ do {
-+ struct sk_buff *nskb = skb->next;
-+
-+ skb->next = nskb->next;
-+ nskb->next = NULL;
-+ kfree_skb(nskb);
-+ } while (skb->next);
-+
-+ cb = DEV_GSO_CB(skb);
-+ if (cb->destructor)
-+ cb->destructor(skb);
-+}
-+
-+/**
-+ * dev_gso_segment - Perform emulated hardware segmentation on skb.
-+ * @skb: buffer to segment
-+ *
-+ * This function segments the given skb and stores the list of segments
-+ * in skb->next.
-+ */
-+static int dev_gso_segment(struct sk_buff *skb)
-+{
-+ struct net_device *dev = skb->dev;
-+ struct sk_buff *segs;
-+ int features = dev->features & ~(illegal_highdma(dev, skb) ?
-+ NETIF_F_SG : 0);
-+
-+ segs = skb_gso_segment(skb, features);
-+
-+ /* Verifying header integrity only. */
-+ if (!segs)
-+ return 0;
-+
-+ if (IS_ERR(segs))
-+ return PTR_ERR(segs);
-+
-+ skb->next = segs;
-+ DEV_GSO_CB(skb)->destructor = skb->destructor;
-+ skb->destructor = dev_gso_skb_destructor;
-+
-+ return 0;
-+}
-+
-+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
-+ struct netdev_queue *txq)
-+{
-+ if (likely(!skb->next)) {
-+ if (!list_empty(&ptype_all))
-+ dev_queue_xmit_nit(skb, dev);
-+
-+ if (netif_needs_gso(dev, skb)) {
-+ if (unlikely(dev_gso_segment(skb)))
-+ goto out_kfree_skb;
-+ if (skb->next)
-+ goto gso;
-+ }
-+
-+ return dev->hard_start_xmit(skb, dev);
-+ }
-+
-+gso:
-+ do {
-+ struct sk_buff *nskb = skb->next;
-+ int rc;
-+
-+ skb->next = nskb->next;
-+ nskb->next = NULL;
-+ rc = dev->hard_start_xmit(nskb, dev);
-+ if (unlikely(rc)) {
-+ nskb->next = skb->next;
-+ skb->next = nskb;
-+ return rc;
-+ }
-+ if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
-+ return NETDEV_TX_BUSY;
-+ } while (skb->next);
-+
-+ skb->destructor = DEV_GSO_CB(skb)->destructor;
-+
-+out_kfree_skb:
-+ kfree_skb(skb);
-+ return 0;
-+}
-+
-+static u32 simple_tx_hashrnd;
-+static int simple_tx_hashrnd_initialized = 0;
-+
-+static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
-+{
-+ u32 addr1, addr2, ports;
-+ u32 hash, ihl;
-+ u8 ip_proto = 0;
-+
-+ if (unlikely(!simple_tx_hashrnd_initialized)) {
-+ get_random_bytes(&simple_tx_hashrnd, 4);
-+ simple_tx_hashrnd_initialized = 1;
-+ }
-+
-+ switch (skb->protocol) {
-+ case __constant_htons(ETH_P_IP):
-+ if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
-+ ip_proto = ip_hdr(skb)->protocol;
-+ addr1 = ip_hdr(skb)->saddr;
-+ addr2 = ip_hdr(skb)->daddr;
-+ ihl = ip_hdr(skb)->ihl;
-+ break;
-+ case __constant_htons(ETH_P_IPV6):
-+ ip_proto = ipv6_hdr(skb)->nexthdr;
-+ addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
-+ addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
-+ ihl = (40 >> 2);
-+ break;
-+ default:
-+ return 0;
-+ }
-+
-+
-+ switch (ip_proto) {
-+ case IPPROTO_TCP:
-+ case IPPROTO_UDP:
-+ case IPPROTO_DCCP:
-+ case IPPROTO_ESP:
-+ case IPPROTO_AH:
-+ case IPPROTO_SCTP:
-+ case IPPROTO_UDPLITE:
-+ ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
-+ break;
-+
-+ default:
-+ ports = 0;
-+ break;
-+ }
-+
-+ hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
-+
-+ return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
-+}
-+
-+static struct netdev_queue *dev_pick_tx(struct net_device *dev,
-+ struct sk_buff *skb)
-+{
-+ u16 queue_index = 0;
-+
-+ if (dev->select_queue)
-+ queue_index = dev->select_queue(dev, skb);
-+ else if (dev->real_num_tx_queues > 1)
-+ queue_index = simple_tx_hash(dev, skb);
-+
-+ skb_set_queue_mapping(skb, queue_index);
-+ return netdev_get_tx_queue(dev, queue_index);
-+}
-+
-+/**
-+ * dev_queue_xmit - transmit a buffer
-+ * @skb: buffer to transmit
-+ *
-+ * Queue a buffer for transmission to a network device. The caller must
-+ * have set the device and priority and built the buffer before calling
-+ * this function. The function can be called from an interrupt.
-+ *
-+ * A negative errno code is returned on a failure. A success does not
-+ * guarantee the frame will be transmitted as it may be dropped due
-+ * to congestion or traffic shaping.
-+ *
-+ * -----------------------------------------------------------------------------------
-+ * I notice this method can also return errors from the queue disciplines,
-+ * including NET_XMIT_DROP, which is a positive value. So, errors can also
-+ * be positive.
-+ *
-+ * Regardless of the return value, the skb is consumed, so it is currently
-+ * difficult to retry a send to this method. (You can bump the ref count
-+ * before sending to hold a reference for retry if you are careful.)
-+ *
-+ * When calling this method, interrupts MUST be enabled. This is because
-+ * the BH enable code must have IRQs enabled so that it will not deadlock.
-+ * --BLG
-+ */
-+int dev_queue_xmit(struct sk_buff *skb)
-+{
-+ struct net_device *dev = skb->dev;
-+ struct netdev_queue *txq;
-+ struct Qdisc *q;
-+ int rc = -ENOMEM;
-+
-+ /* GSO will handle the following emulations directly. */
-+ if (netif_needs_gso(dev, skb))
-+ goto gso;
-+
-+ if (skb_shinfo(skb)->frag_list &&
-+ !(dev->features & NETIF_F_FRAGLIST) &&
-+ __skb_linearize(skb))
-+ goto out_kfree_skb;
-+
-+ /* Fragmented skb is linearized if device does not support SG,
-+ * or if at least one of fragments is in highmem and device
-+ * does not support DMA from it.
-+ */
-+ if (skb_shinfo(skb)->nr_frags &&
-+ (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
-+ __skb_linearize(skb))
-+ goto out_kfree_skb;
-+
-+ /* If packet is not checksummed and device does not support
-+ * checksumming for this protocol, complete checksumming here.
-+ */
-+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
-+ skb_set_transport_header(skb, skb->csum_start -
-+ skb_headroom(skb));
-+ if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
-+ goto out_kfree_skb;
-+ }
-+
-+gso:
-+ /* Disable soft irqs for various locks below. Also
-+ * stops preemption for RCU.
-+ */
-+ rcu_read_lock_bh();
-+
-+ txq = dev_pick_tx(dev, skb);
-+ q = rcu_dereference(txq->qdisc);
-+
-+#ifdef CONFIG_NET_CLS_ACT
-+ skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
-+#endif
-+ if (q->enqueue) {
-+ spinlock_t *root_lock = qdisc_lock(q);
-+
-+ spin_lock(root_lock);
-+
-+ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
-+ kfree_skb(skb);
-+ rc = NET_XMIT_DROP;
-+ } else {
-+ rc = qdisc_enqueue_root(skb, q);
-+ qdisc_run(q);
-+ }
-+ spin_unlock(root_lock);
-+
-+ goto out;
-+ }
-+
-+ /* The device has no queue. Common case for software devices:
-+ loopback, all the sorts of tunnels...
-+
-+ Really, it is unlikely that netif_tx_lock protection is necessary
-+ here. (f.e. loopback and IP tunnels are clean ignoring statistics
-+ counters.)
-+ However, it is possible, that they rely on protection
-+ made by us here.
-+
-+ Check this and shot the lock. It is not prone from deadlocks.
-+ Either shot noqueue qdisc, it is even simpler 8)
-+ */
-+ if (dev->flags & IFF_UP) {
-+ int cpu = smp_processor_id(); /* ok because BHs are off */
-+
-+ if (txq->xmit_lock_owner != cpu) {
-+
-+ HARD_TX_LOCK(dev, txq, cpu);
-+
-+ if (!netif_tx_queue_stopped(txq)) {
-+ rc = 0;
-+ if (!dev_hard_start_xmit(skb, dev, txq)) {
-+ HARD_TX_UNLOCK(dev, txq);
-+ goto out;
-+ }
-+ }
-+ HARD_TX_UNLOCK(dev, txq);
-+ if (net_ratelimit())
-+ printk(KERN_CRIT "Virtual device %s asks to "
-+ "queue packet!\n", dev->name);
-+ } else {
-+ /* Recursion is detected! It is possible,
-+ * unfortunately */
-+ if (net_ratelimit())
-+ printk(KERN_CRIT "Dead loop on virtual device "
-+ "%s, fix it urgently!\n", dev->name);
-+ }
-+ }
-+
-+ rc = -ENETDOWN;
-+ rcu_read_unlock_bh();
-+
-+out_kfree_skb:
-+ kfree_skb(skb);
-+ return rc;
-+out:
-+ rcu_read_unlock_bh();
-+ return rc;
-+}
-+
-+
-+/*=======================================================================
-+ Receiver routines
-+ =======================================================================*/
-+
-+int netdev_max_backlog __read_mostly = 1000;
-+int netdev_budget __read_mostly = 300;
-+int weight_p __read_mostly = 64; /* old backlog weight */
-+
-+DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
-+
-+
-+/**
-+ * netif_rx - post buffer to the network code
-+ * @skb: buffer to post
-+ *
-+ * This function receives a packet from a device driver and queues it for
-+ * the upper (protocol) levels to process. It always succeeds. The buffer
-+ * may be dropped during processing for congestion control or by the
-+ * protocol layers.
-+ *
-+ * return values:
-+ * NET_RX_SUCCESS (no congestion)
-+ * NET_RX_DROP (packet was dropped)
-+ *
-+ */
-+
-+int netif_rx(struct sk_buff *skb)
-+{
-+ struct softnet_data *queue;
-+ unsigned long flags;
-+
-+ /* if netpoll wants it, pretend we never saw it */
-+ if (netpoll_rx(skb))
-+ return NET_RX_DROP;
-+
-+ if (!skb->tstamp.tv64)
-+ net_timestamp(skb);
-+
-+ /*
-+ * The code is rearranged so that the path is the most
-+ * short when CPU is congested, but is still operating.
-+ */
-+ local_irq_save(flags);
-+ queue = &__get_cpu_var(softnet_data);
-+
-+ __get_cpu_var(netdev_rx_stat).total++;
-+ if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
-+ if (queue->input_pkt_queue.qlen) {
-+enqueue:
-+ __skb_queue_tail(&queue->input_pkt_queue, skb);
-+ local_irq_restore(flags);
-+ return NET_RX_SUCCESS;
-+ }
-+
-+ napi_schedule(&queue->backlog);
-+ goto enqueue;
-+ }
-+
-+ __get_cpu_var(netdev_rx_stat).dropped++;
-+ local_irq_restore(flags);
-+
-+ kfree_skb(skb);
-+ return NET_RX_DROP;
-+}
-+
-+int netif_rx_ni(struct sk_buff *skb)
-+{
-+ int err;
-+
-+ preempt_disable();
-+ err = netif_rx(skb);
-+ if (local_softirq_pending())
-+ do_softirq();
-+ preempt_enable();
-+
-+ return err;
-+}
-+
-+EXPORT_SYMBOL(netif_rx_ni);
-+
-+static void net_tx_action(struct softirq_action *h)
-+{
-+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
-+
-+ if (sd->completion_queue) {
-+ struct sk_buff *clist;
-+
-+ local_irq_disable();
-+ clist = sd->completion_queue;
-+ sd->completion_queue = NULL;
-+ local_irq_enable();
-+
-+ while (clist) {
-+ struct sk_buff *skb = clist;
-+ clist = clist->next;
-+
-+ WARN_ON(atomic_read(&skb->users));
-+ __kfree_skb(skb);
-+ }
-+ }
-+
-+ if (sd->output_queue) {
-+ struct Qdisc *head;
-+
-+ local_irq_disable();
-+ head = sd->output_queue;
-+ sd->output_queue = NULL;
-+ local_irq_enable();
-+
-+ while (head) {
-+ struct Qdisc *q = head;
-+ spinlock_t *root_lock;
-+
-+ head = head->next_sched;
-+
-+ root_lock = qdisc_lock(q);
-+ if (spin_trylock(root_lock)) {
-+ smp_mb__before_clear_bit();
-+ clear_bit(__QDISC_STATE_SCHED,
-+ &q->state);
-+ qdisc_run(q);
-+ spin_unlock(root_lock);
-+ } else {
-+ if (!test_bit(__QDISC_STATE_DEACTIVATED,
-+ &q->state)) {
-+ __netif_reschedule(q);
-+ } else {
-+ smp_mb__before_clear_bit();
-+ clear_bit(__QDISC_STATE_SCHED,
-+ &q->state);
-+ }
-+ }
-+ }
-+ }
-+}
-+
-+static inline int deliver_skb(struct sk_buff *skb,
-+ struct packet_type *pt_prev,
-+ struct net_device *orig_dev)
-+{
-+ atomic_inc(&skb->users);
-+ return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
-+}
-+
-+
-+/*
-+ * Click: Allow Click to ask to intercept input packets.
-+ */
-+int
-+register_net_in(struct notifier_block *nb)
-+{
-+ int err;
-+ rtnl_lock();
-+ err = raw_notifier_chain_register(&net_in_chain, nb);
-+ rtnl_unlock();
-+ return err;
-+}
-+
-+int
-+unregister_net_in(struct notifier_block *nb)
-+{
-+ int err;
-+ rtnl_lock();
-+ err = raw_notifier_chain_unregister(&net_in_chain, nb);
-+ rtnl_unlock();
-+ return err;
-+}
-+
-+
-+#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
-+/* These hooks defined here for ATM */
-+struct net_bridge;
-+struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
-+ unsigned char *addr);
-+void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
-+
-+/*
-+ * If bridge module is loaded call bridging hook.
-+ * returns NULL if packet was consumed.
-+ */
-+struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
-+ struct sk_buff *skb) __read_mostly;
-+static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
-+ struct packet_type **pt_prev, int *ret,
-+ struct net_device *orig_dev)
-+{
-+ struct net_bridge_port *port;
-+
-+ if (skb->pkt_type == PACKET_LOOPBACK ||
-+ (port = rcu_dereference(skb->dev->br_port)) == NULL)
-+ return skb;
-+
-+ if (*pt_prev) {
-+ *ret = deliver_skb(skb, *pt_prev, orig_dev);
-+ *pt_prev = NULL;
-+ }
-+
-+ return br_handle_frame_hook(port, skb);
-+}
-+#else
-+#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
-+#endif
-+
-+#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
-+struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
-+EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
-+
-+static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
-+ struct packet_type **pt_prev,
-+ int *ret,
-+ struct net_device *orig_dev)
-+{
-+ if (skb->dev->macvlan_port == NULL)
-+ return skb;
-+
-+ if (*pt_prev) {
-+ *ret = deliver_skb(skb, *pt_prev, orig_dev);
-+ *pt_prev = NULL;
-+ }
-+ return macvlan_handle_frame_hook(skb);
-+}
-+#else
-+#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
-+#endif
-+
-+#ifdef CONFIG_NET_CLS_ACT
-+/* TODO: Maybe we should just force sch_ingress to be compiled in
-+ * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
-+ * a compare and 2 stores extra right now if we dont have it on
-+ * but have CONFIG_NET_CLS_ACT
-+ * NOTE: This doesnt stop any functionality; if you dont have
-+ * the ingress scheduler, you just cant add policies on ingress.
-+ *
-+ */
-+static int ing_filter(struct sk_buff *skb)
-+{
-+ struct net_device *dev = skb->dev;
-+ u32 ttl = G_TC_RTTL(skb->tc_verd);
-+ struct netdev_queue *rxq;
-+ int result = TC_ACT_OK;
-+ struct Qdisc *q;
-+
-+ if (MAX_RED_LOOP < ttl++) {
-+ printk(KERN_WARNING
-+ "Redir loop detected Dropping packet (%d->%d)\n",
-+ skb->iif, dev->ifindex);
-+ return TC_ACT_SHOT;
-+ }
-+
-+ skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
-+ skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
-+
-+ rxq = &dev->rx_queue;
-+
-+ q = rxq->qdisc;
-+ if (q != &noop_qdisc) {
-+ spin_lock(qdisc_lock(q));
-+ if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
-+ result = qdisc_enqueue_root(skb, q);
-+ spin_unlock(qdisc_lock(q));
-+ }
-+
-+ return result;
-+}
-+
-+static inline struct sk_buff *handle_ing(struct sk_buff *skb,
-+ struct packet_type **pt_prev,
-+ int *ret, struct net_device *orig_dev)
-+{
-+ if (skb->dev->rx_queue.qdisc == &noop_qdisc)
-+ goto out;
-+
-+ if (*pt_prev) {
-+ *ret = deliver_skb(skb, *pt_prev, orig_dev);
-+ *pt_prev = NULL;
-+ } else {
-+ /* Huh? Why does turning on AF_PACKET affect this? */
-+ skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
-+ }
-+
-+ switch (ing_filter(skb)) {
-+ case TC_ACT_SHOT:
-+ case TC_ACT_STOLEN:
-+ kfree_skb(skb);
-+ return NULL;
-+ }
-+
-+out:
-+ skb->tc_verd = 0;
-+ return skb;
-+}
-+#endif
-+
-+/*
-+ * netif_nit_deliver - deliver received packets to network taps
-+ * @skb: buffer
-+ *
-+ * This function is used to deliver incoming packets to network
-+ * taps. It should be used when the normal netif_receive_skb path
-+ * is bypassed, for example because of VLAN acceleration.
-+ */
-+void netif_nit_deliver(struct sk_buff *skb)
-+{
-+ struct packet_type *ptype;
-+
-+ if (list_empty(&ptype_all))
-+ return;
-+
-+ skb_reset_network_header(skb);
-+ skb_reset_transport_header(skb);
-+ skb->mac_len = skb->network_header - skb->mac_header;
-+
-+ rcu_read_lock();
-+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
-+ if (!ptype->dev || ptype->dev == skb->dev)
-+ deliver_skb(skb, ptype, skb->dev);
-+ }
-+ rcu_read_unlock();
-+}
-+
-+/**
-+ * netif_receive_skb - process receive buffer from network
-+ * @skb: buffer to process
-+ *
-+ * netif_receive_skb() is the main receive data processing function.
-+ * It always succeeds. The buffer may be dropped during processing
-+ * for congestion control or by the protocol layers.
-+ *
-+ * This function may only be called from softirq context and interrupts
-+ * should be enabled.
-+ *
-+ * Return values (usually ignored):
-+ * NET_RX_SUCCESS: no congestion
-+ * NET_RX_DROP: packet was dropped
-+ */
-+//int netif_receive_skb(struct sk_buff *skb)
-+int __netif_receive_skb(struct sk_buff *skb, unsigned short type, int notifier_data)
-+{
-+ struct packet_type *ptype, *pt_prev;
-+ struct net_device *orig_dev;
-+ struct net_device *null_or_orig;
-+ int ret = NET_RX_DROP;
-+// __be16 type;
-+
-+ /* if we've gotten here through NAPI, check netpoll */
-+ if (netpoll_receive_skb(skb))
-+ return NET_RX_DROP;
-+
-+ if (!skb->tstamp.tv64)
-+ net_timestamp(skb);
-+
-+ if (!skb->iif)
-+ skb->iif = skb->dev->ifindex;
-+
-+ null_or_orig = NULL;
-+ orig_dev = skb->dev;
-+ if (orig_dev->master) {
-+ if (skb_bond_should_drop(skb))
-+ null_or_orig = orig_dev; /* deliver only exact match */
-+ else
-+ skb->dev = orig_dev->master;
-+ }
-+
-+ __get_cpu_var(netdev_rx_stat).total++;
-+
-+ skb_reset_network_header(skb);
-+ skb_reset_transport_header(skb);
-+ skb->mac_len = skb->network_header - skb->mac_header;
-+
-+ /* Click: may want to steal the packet */
-+ if (notifier_data >= 0
-+ && raw_notifier_call_chain(&net_in_chain,
-+ notifier_data,
-+ skb) & NOTIFY_STOP_MASK) {
-+ return ret;
-+ }
-+
-+ pt_prev = NULL;
-+
-+ rcu_read_lock();
-+
-+ /* Don't receive packets in an exiting network namespace */
-+ if (!net_alive(dev_net(skb->dev)))
-+ goto out;
-+
-+#ifdef CONFIG_NET_CLS_ACT
-+ if (skb->tc_verd & TC_NCLS) {
-+ skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
-+ goto ncls;
-+ }
-+#endif
-+
-+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
-+ if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
-+ ptype->dev == orig_dev) {
-+ if (pt_prev)
-+ ret = deliver_skb(skb, pt_prev, orig_dev);
-+ pt_prev = ptype;
-+ }
-+ }
-+
-+#ifdef CONFIG_NET_CLS_ACT
-+ skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
-+ if (!skb)
-+ goto out;
-+ncls:
-+#endif
-+
-+ skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
-+ if (!skb)
-+ goto out;
-+ skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
-+ if (!skb)
-+ goto out;
-+
-+ type = skb->protocol;
-+ list_for_each_entry_rcu(ptype,
-+ &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
-+ if (ptype->type == type &&
-+ (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
-+ ptype->dev == orig_dev)) {
-+ if (pt_prev)
-+ ret = deliver_skb(skb, pt_prev, orig_dev);
-+ pt_prev = ptype;
-+ }
-+ }
-+
-+ if (pt_prev) {
-+ ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
-+ } else {
-+ kfree_skb(skb);
-+ /* Jamal, now you will not able to escape explaining
-+ * me how you were going to use this. :-)
-+ */
-+ ret = NET_RX_DROP;
-+ }
-+
-+out:
-+ rcu_read_unlock();
-+ return ret;
-+}
-+
-+/* Network device is going away, flush any packets still pending */
-+static void flush_backlog(void *arg)
-+{
-+ struct net_device *dev = arg;
-+ struct softnet_data *queue = &__get_cpu_var(softnet_data);
-+ struct sk_buff *skb, *tmp;
-+
-+ skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
-+ if (skb->dev == dev) {
-+ __skb_unlink(skb, &queue->input_pkt_queue);
-+ kfree_skb(skb);
-+ }
-+}
-+
-+static int process_backlog(struct napi_struct *napi, int quota)
-+{
-+ int work = 0;
-+ struct softnet_data *queue = &__get_cpu_var(softnet_data);
-+ unsigned long start_time = jiffies;
-+
-+ napi->weight = weight_p;
-+ do {
-+ struct sk_buff *skb;
-+
-+ local_irq_disable();
-+ skb = __skb_dequeue(&queue->input_pkt_queue);
-+ if (!skb) {
-+ __napi_complete(napi);
-+ local_irq_enable();
-+ break;
-+ }
-+ local_irq_enable();
-+
-+ __netif_receive_skb(skb, skb->protocol, skb_queue_len(&queue->input_pkt_queue));
-+//XXX netif_receive_skb(skb);
-+ } while (++work < quota && jiffies == start_time);
-+
-+ return work;
-+}
-+
-+/**
-+ * __napi_schedule - schedule for receive
-+ * @n: entry to schedule
-+ *
-+ * The entry's receive function will be scheduled to run
-+ */
-+void __napi_schedule(struct napi_struct *n)
-+{
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
-+ list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
-+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
-+ local_irq_restore(flags);
-+}
-+EXPORT_SYMBOL(__napi_schedule);
-+
-+
-+static void net_rx_action(struct softirq_action *h)
-+{
-+ struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
-+ unsigned long start_time = jiffies;
-+ int budget = netdev_budget;
-+ void *have;
-+
-+ local_irq_disable();
-+
-+ while (!list_empty(list)) {
-+ struct napi_struct *n;
-+ int work, weight;
-+
-+ /* If softirq window is exhuasted then punt.
-+ *
-+ * Note that this is a slight policy change from the
-+ * previous NAPI code, which would allow up to 2
-+ * jiffies to pass before breaking out. The test
-+ * used to be "jiffies - start_time > 1".
-+ */
-+ if (unlikely(budget <= 0 || jiffies != start_time))
-+ goto softnet_break;
-+
-+ local_irq_enable();
-+
-+ /* Even though interrupts have been re-enabled, this
-+ * access is safe because interrupts can only add new
-+ * entries to the tail of this list, and only ->poll()
-+ * calls can remove this head entry from the list.
-+ */
-+ n = list_entry(list->next, struct napi_struct, poll_list);
-+
-+ have = netpoll_poll_lock(n);
-+
-+ weight = n->weight;
-+
-+ /* This NAPI_STATE_SCHED test is for avoiding a race
-+ * with netpoll's poll_napi(). Only the entity which
-+ * obtains the lock and sees NAPI_STATE_SCHED set will
-+ * actually make the ->poll() call. Therefore we avoid
-+ * accidently calling ->poll() when NAPI is not scheduled.
-+ */
-+ work = 0;
-+ if (test_bit(NAPI_STATE_SCHED, &n->state))
-+ work = n->poll(n, weight);
-+
-+ WARN_ON_ONCE(work > weight);
-+
-+ budget -= work;
-+
-+ local_irq_disable();
-+
-+ /* Drivers must not modify the NAPI state if they
-+ * consume the entire weight. In such cases this code
-+ * still "owns" the NAPI instance and therefore can
-+ * move the instance around on the list at-will.
-+ */
-+ if (unlikely(work == weight)) {
-+ if (unlikely(napi_disable_pending(n)))
-+ __napi_complete(n);
-+ else
-+ list_move_tail(&n->poll_list, list);
-+ }
-+
-+ netpoll_poll_unlock(have);
-+ }
-+out:
-+ local_irq_enable();
-+
-+#ifdef CONFIG_NET_DMA
-+ /*
-+ * There may not be any more sk_buffs coming right now, so push
-+ * any pending DMA copies to hardware
-+ */
-+ if (!cpus_empty(net_dma.channel_mask)) {
-+ int chan_idx;
-+ for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
-+ struct dma_chan *chan = net_dma.channels[chan_idx];
-+ if (chan)
-+ dma_async_memcpy_issue_pending(chan);
-+ }
-+ }
-+#endif
-+
-+ return;
-+
-+softnet_break:
-+ __get_cpu_var(netdev_rx_stat).time_squeeze++;
-+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
-+ goto out;
-+}
-+
-+static gifconf_func_t * gifconf_list [NPROTO];
-+
-+/**
-+ * register_gifconf - register a SIOCGIF handler
-+ * @family: Address family
-+ * @gifconf: Function handler
-+ *
-+ * Register protocol dependent address dumping routines. The handler
-+ * that is passed must not be freed or reused until it has been replaced
-+ * by another handler.
-+ */
-+int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
-+{
-+ if (family >= NPROTO)
-+ return -EINVAL;
-+ gifconf_list[family] = gifconf;
-+ return 0;
-+}
-+
-+
-+/*
-+ * Map an interface index to its name (SIOCGIFNAME)
-+ */
-+
-+/*
-+ * We need this ioctl for efficient implementation of the
-+ * if_indextoname() function required by the IPv6 API. Without
-+ * it, we would have to search all the interfaces to find a
-+ * match. --pb
-+ */
-+
-+static int dev_ifname(struct net *net, struct ifreq __user *arg)
-+{
-+ struct net_device *dev;
-+ struct ifreq ifr;
-+
-+ /*
-+ * Fetch the caller's info block.
-+ */
-+
-+ if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
-+ return -EFAULT;
-+
-+ read_lock(&dev_base_lock);
-+ dev = __dev_get_by_index(net, ifr.ifr_ifindex);
-+ if (!dev) {
-+ read_unlock(&dev_base_lock);
-+ return -ENODEV;
-+ }
-+
-+ strcpy(ifr.ifr_name, dev->name);
-+ read_unlock(&dev_base_lock);
-+
-+ if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
-+ return -EFAULT;
-+ return 0;
-+}
-+
-+/*
-+ * Perform a SIOCGIFCONF call. This structure will change
-+ * size eventually, and there is nothing I can do about it.
-+ * Thus we will need a 'compatibility mode'.
-+ */
-+
-+static int dev_ifconf(struct net *net, char __user *arg)
-+{
-+ struct ifconf ifc;
-+ struct net_device *dev;
-+ char __user *pos;
-+ int len;
-+ int total;
-+ int i;
-+
-+ /*
-+ * Fetch the caller's info block.
-+ */
-+
-+ if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
-+ return -EFAULT;
-+
-+ pos = ifc.ifc_buf;
-+ len = ifc.ifc_len;
-+
-+ /*
-+ * Loop over the interfaces, and write an info block for each.
-+ */
-+
-+ total = 0;
-+ for_each_netdev(net, dev) {
-+ if (!nx_dev_visible(current->nx_info, dev))
-+ continue;
-+ for (i = 0; i < NPROTO; i++) {
-+ if (gifconf_list[i]) {
-+ int done;
-+ if (!pos)
-+ done = gifconf_list[i](dev, NULL, 0);
-+ else
-+ done = gifconf_list[i](dev, pos + total,
-+ len - total);
-+ if (done < 0)
-+ return -EFAULT;
-+ total += done;
-+ }
-+ }
-+ }
-+
-+ /*
-+ * All done. Write the updated control block back to the caller.
-+ */
-+ ifc.ifc_len = total;
-+
-+ /*
-+ * Both BSD and Solaris return 0 here, so we do too.
-+ */
-+ return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
-+}
-+
-+#ifdef CONFIG_PROC_FS
-+/*
-+ * This is invoked by the /proc filesystem handler to display a device
-+ * in detail.
-+ */
-+void *dev_seq_start(struct seq_file *seq, loff_t *pos)
-+ __acquires(dev_base_lock)
-+{
-+ struct net *net = seq_file_net(seq);
-+ loff_t off;
-+ struct net_device *dev;
-+
-+ read_lock(&dev_base_lock);
-+ if (!*pos)
-+ return SEQ_START_TOKEN;
-+
-+ off = 1;
-+ for_each_netdev(net, dev)
-+ if (off++ == *pos)
-+ return dev;
-+
-+ return NULL;
-+}
-+
-+void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+ struct net *net = seq_file_net(seq);
-+ ++*pos;
-+ return v == SEQ_START_TOKEN ?
-+ first_net_device(net) : next_net_device((struct net_device *)v);
-+}
-+
-+void dev_seq_stop(struct seq_file *seq, void *v)
-+ __releases(dev_base_lock)
-+{
-+ read_unlock(&dev_base_lock);
-+}
-+
-+static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
-+{
-+ struct net_device_stats *stats = dev->get_stats(dev);
-+
-+ if (!nx_dev_visible(current->nx_info, dev))
-+ return;
-+
-+ seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
-+ "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
-+ dev->name, stats->rx_bytes, stats->rx_packets,
-+ stats->rx_errors,
-+ stats->rx_dropped + stats->rx_missed_errors,
-+ stats->rx_fifo_errors,
-+ stats->rx_length_errors + stats->rx_over_errors +
-+ stats->rx_crc_errors + stats->rx_frame_errors,
-+ stats->rx_compressed, stats->multicast,
-+ stats->tx_bytes, stats->tx_packets,
-+ stats->tx_errors, stats->tx_dropped,
-+ stats->tx_fifo_errors, stats->collisions,
-+ stats->tx_carrier_errors +
-+ stats->tx_aborted_errors +
-+ stats->tx_window_errors +
-+ stats->tx_heartbeat_errors,
-+ stats->tx_compressed);
-+}
-+
-+/*
-+ * Called from the PROCfs module. This now uses the new arbitrary sized
-+ * /proc/net interface to create /proc/net/dev
-+ */
-+static int dev_seq_show(struct seq_file *seq, void *v)
-+{
-+ if (v == SEQ_START_TOKEN)
-+ seq_puts(seq, "Inter-| Receive "
-+ " | Transmit\n"
-+ " face |bytes packets errs drop fifo frame "
-+ "compressed multicast|bytes packets errs "
-+ "drop fifo colls carrier compressed\n");
-+ else
-+ dev_seq_printf_stats(seq, v);
-+ return 0;
-+}
-+
-+static struct netif_rx_stats *softnet_get_online(loff_t *pos)
-+{
-+ struct netif_rx_stats *rc = NULL;
-+
-+ while (*pos < nr_cpu_ids)
-+ if (cpu_online(*pos)) {
-+ rc = &per_cpu(netdev_rx_stat, *pos);
-+ break;
-+ } else
-+ ++*pos;
-+ return rc;
-+}
-+
-+static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
-+{
-+ return softnet_get_online(pos);
-+}
-+
-+static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+ ++*pos;
-+ return softnet_get_online(pos);
-+}
-+
-+static void softnet_seq_stop(struct seq_file *seq, void *v)
-+{
-+}
-+
-+static int softnet_seq_show(struct seq_file *seq, void *v)
-+{
-+ struct netif_rx_stats *s = v;
-+
-+ seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
-+ s->total, s->dropped, s->time_squeeze, 0,
-+ 0, 0, 0, 0, /* was fastroute */
-+ s->cpu_collision );
-+ return 0;
-+}
-+
-+static const struct seq_operations dev_seq_ops = {
-+ .start = dev_seq_start,
-+ .next = dev_seq_next,
-+ .stop = dev_seq_stop,
-+ .show = dev_seq_show,
-+};
-+
-+static int dev_seq_open(struct inode *inode, struct file *file)
-+{
-+ return seq_open_net(inode, file, &dev_seq_ops,
-+ sizeof(struct seq_net_private));
-+}
-+
-+static const struct file_operations dev_seq_fops = {
-+ .owner = THIS_MODULE,
-+ .open = dev_seq_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release_net,
-+};
-+
-+static const struct seq_operations softnet_seq_ops = {
-+ .start = softnet_seq_start,
-+ .next = softnet_seq_next,
-+ .stop = softnet_seq_stop,
-+ .show = softnet_seq_show,
-+};
-+
-+static int softnet_seq_open(struct inode *inode, struct file *file)
-+{
-+ return seq_open(file, &softnet_seq_ops);
-+}
-+
-+static const struct file_operations softnet_seq_fops = {
-+ .owner = THIS_MODULE,
-+ .open = softnet_seq_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release,
-+};
-+
-+static void *ptype_get_idx(loff_t pos)
-+{
-+ struct packet_type *pt = NULL;
-+ loff_t i = 0;
-+ int t;
-+
-+ list_for_each_entry_rcu(pt, &ptype_all, list) {
-+ if (i == pos)
-+ return pt;
-+ ++i;
-+ }
-+
-+ for (t = 0; t < PTYPE_HASH_SIZE; t++) {
-+ list_for_each_entry_rcu(pt, &ptype_base[t], list) {
-+ if (i == pos)
-+ return pt;
-+ ++i;
-+ }
-+ }
-+ return NULL;
-+}
-+
-+static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
-+ __acquires(RCU)
-+{
-+ rcu_read_lock();
-+ return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
-+}
-+
-+static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-+{
-+ struct packet_type *pt;
-+ struct list_head *nxt;
-+ int hash;
-+
-+ ++*pos;
-+ if (v == SEQ_START_TOKEN)
-+ return ptype_get_idx(0);
-+
-+ pt = v;
-+ nxt = pt->list.next;
-+ if (pt->type == htons(ETH_P_ALL)) {
-+ if (nxt != &ptype_all)
-+ goto found;
-+ hash = 0;
-+ nxt = ptype_base[0].next;
-+ } else
-+ hash = ntohs(pt->type) & PTYPE_HASH_MASK;
-+
-+ while (nxt == &ptype_base[hash]) {
-+ if (++hash >= PTYPE_HASH_SIZE)
-+ return NULL;
-+ nxt = ptype_base[hash].next;
-+ }
-+found:
-+ return list_entry(nxt, struct packet_type, list);
-+}
-+
-+static void ptype_seq_stop(struct seq_file *seq, void *v)
-+ __releases(RCU)
-+{
-+ rcu_read_unlock();
-+}
-+
-+static void ptype_seq_decode(struct seq_file *seq, void *sym)
-+{
-+#ifdef CONFIG_KALLSYMS
-+ unsigned long offset = 0, symsize;
-+ const char *symname;
-+ char *modname;
-+ char namebuf[128];
-+
-+ symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
-+ &modname, namebuf);
-+
-+ if (symname) {
-+ char *delim = ":";
-+
-+ if (!modname)
-+ modname = delim = "";
-+ seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
-+ symname, offset);
-+ return;
-+ }
-+#endif
-+
-+ seq_printf(seq, "[%p]", sym);
-+}
-+
-+static int ptype_seq_show(struct seq_file *seq, void *v)
-+{
-+ struct packet_type *pt = v;
-+
-+ if (v == SEQ_START_TOKEN)
-+ seq_puts(seq, "Type Device Function\n");
-+ else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
-+ if (pt->type == htons(ETH_P_ALL))
-+ seq_puts(seq, "ALL ");
-+ else
-+ seq_printf(seq, "%04x", ntohs(pt->type));
-+
-+ seq_printf(seq, " %-8s ",
-+ pt->dev ? pt->dev->name : "");
-+ ptype_seq_decode(seq, pt->func);
-+ seq_putc(seq, '\n');
-+ }
-+
-+ return 0;
-+}
-+
-+static const struct seq_operations ptype_seq_ops = {
-+ .start = ptype_seq_start,
-+ .next = ptype_seq_next,
-+ .stop = ptype_seq_stop,
-+ .show = ptype_seq_show,
-+};
-+
-+static int ptype_seq_open(struct inode *inode, struct file *file)
-+{
-+ return seq_open_net(inode, file, &ptype_seq_ops,
-+ sizeof(struct seq_net_private));
-+}
-+
-+static const struct file_operations ptype_seq_fops = {
-+ .owner = THIS_MODULE,
-+ .open = ptype_seq_open,
-+ .read = seq_read,
-+ .llseek = seq_lseek,
-+ .release = seq_release_net,
-+};
-+
-+
-+static int __net_init dev_proc_net_init(struct net *net)
-+{
-+ int rc = -ENOMEM;
-+
-+ if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
-+ goto out;
-+ if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
-+ goto out_dev;
-+ if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
-+ goto out_softnet;
-+
-+ if (wext_proc_init(net))
-+ goto out_ptype;
-+ rc = 0;
-+out:
-+ return rc;
-+out_ptype:
-+ proc_net_remove(net, "ptype");
-+out_softnet:
-+ proc_net_remove(net, "softnet_stat");
-+out_dev:
-+ proc_net_remove(net, "dev");
-+ goto out;
-+}
-+
-+static void __net_exit dev_proc_net_exit(struct net *net)
-+{
-+ wext_proc_exit(net);
-+
-+ proc_net_remove(net, "ptype");
-+ proc_net_remove(net, "softnet_stat");
-+ proc_net_remove(net, "dev");
-+}
-+
-+static struct pernet_operations __net_initdata dev_proc_ops = {
-+ .init = dev_proc_net_init,
-+ .exit = dev_proc_net_exit,
-+};
-+
-+static int __init dev_proc_init(void)
-+{
-+ return register_pernet_subsys(&dev_proc_ops);
-+}
-+#else
-+#define dev_proc_init() 0
-+#endif /* CONFIG_PROC_FS */
-+
-+
-+/**
-+ * netdev_set_master - set up master/slave pair
-+ * @slave: slave device
-+ * @master: new master device
-+ *
-+ * Changes the master device of the slave. Pass %NULL to break the
-+ * bonding. The caller must hold the RTNL semaphore. On a failure
-+ * a negative errno code is returned. On success the reference counts
-+ * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
-+ * function returns zero.
-+ */
-+int netdev_set_master(struct net_device *slave, struct net_device *master)
-+{
-+ struct net_device *old = slave->master;
-+
-+ ASSERT_RTNL();
-+
-+ if (master) {
-+ if (old)
-+ return -EBUSY;
-+ dev_hold(master);
-+ }
-+
-+ slave->master = master;
-+
-+ synchronize_net();
-+
-+ if (old)
-+ dev_put(old);
-+
-+ if (master)
-+ slave->flags |= IFF_SLAVE;
-+ else
-+ slave->flags &= ~IFF_SLAVE;
-+
-+ rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
-+ return 0;
-+}
-+
-+static void dev_change_rx_flags(struct net_device *dev, int flags)
-+{
-+ if (dev->flags & IFF_UP && dev->change_rx_flags)
-+ dev->change_rx_flags(dev, flags);
-+}
-+
-+static int __dev_set_promiscuity(struct net_device *dev, int inc)
-+{
-+ unsigned short old_flags = dev->flags;
-+
-+ ASSERT_RTNL();
-+
-+ dev->flags |= IFF_PROMISC;
-+ dev->promiscuity += inc;
-+ if (dev->promiscuity == 0) {
-+ /*
-+ * Avoid overflow.
-+ * If inc causes overflow, untouch promisc and return error.
-+ */
-+ if (inc < 0)
-+ dev->flags &= ~IFF_PROMISC;
-+ else {
-+ dev->promiscuity -= inc;
-+ printk(KERN_WARNING "%s: promiscuity touches roof, "
-+ "set promiscuity failed, promiscuity feature "
-+ "of device might be broken.\n", dev->name);
-+ return -EOVERFLOW;
-+ }
-+ }
-+ if (dev->flags != old_flags) {
-+ printk(KERN_INFO "device %s %s promiscuous mode\n",
-+ dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
-+ "left");
-+ if (audit_enabled)
-+ audit_log(current->audit_context, GFP_ATOMIC,
-+ AUDIT_ANOM_PROMISCUOUS,
-+ "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
-+ dev->name, (dev->flags & IFF_PROMISC),
-+ (old_flags & IFF_PROMISC),
-+ audit_get_loginuid(current),
-+ current->uid, current->gid,
-+ audit_get_sessionid(current));
-+
-+ dev_change_rx_flags(dev, IFF_PROMISC);
-+ }
-+ return 0;
-+}
-+
-+/**
-+ * dev_set_promiscuity - update promiscuity count on a device
-+ * @dev: device
-+ * @inc: modifier
-+ *
-+ * Add or remove promiscuity from a device. While the count in the device
-+ * remains above zero the interface remains promiscuous. Once it hits zero
-+ * the device reverts back to normal filtering operation. A negative inc
-+ * value is used to drop promiscuity on the device.
-+ * Return 0 if successful or a negative errno code on error.
-+ */
-+int dev_set_promiscuity(struct net_device *dev, int inc)
-+{
-+ unsigned short old_flags = dev->flags;
-+ int err;
-+
-+ err = __dev_set_promiscuity(dev, inc);
-+ if (err < 0)
-+ return err;
-+ if (dev->flags != old_flags)
-+ dev_set_rx_mode(dev);
-+ return err;
-+}
-+
-+/**
-+ * dev_set_allmulti - update allmulti count on a device
-+ * @dev: device
-+ * @inc: modifier
-+ *
-+ * Add or remove reception of all multicast frames to a device. While the
-+ * count in the device remains above zero the interface remains listening
-+ * to all interfaces. Once it hits zero the device reverts back to normal
-+ * filtering operation. A negative @inc value is used to drop the counter
-+ * when releasing a resource needing all multicasts.
-+ * Return 0 if successful or a negative errno code on error.
-+ */
-+
-+int dev_set_allmulti(struct net_device *dev, int inc)
-+{
-+ unsigned short old_flags = dev->flags;
-+
-+ ASSERT_RTNL();
-+
-+ dev->flags |= IFF_ALLMULTI;
-+ dev->allmulti += inc;
-+ if (dev->allmulti == 0) {
-+ /*
-+ * Avoid overflow.
-+ * If inc causes overflow, untouch allmulti and return error.
-+ */
-+ if (inc < 0)
-+ dev->flags &= ~IFF_ALLMULTI;
-+ else {
-+ dev->allmulti -= inc;
-+ printk(KERN_WARNING "%s: allmulti touches roof, "
-+ "set allmulti failed, allmulti feature of "
-+ "device might be broken.\n", dev->name);
-+ return -EOVERFLOW;
-+ }
-+ }
-+ if (dev->flags ^ old_flags) {
-+ dev_change_rx_flags(dev, IFF_ALLMULTI);
-+ dev_set_rx_mode(dev);
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Upload unicast and multicast address lists to device and
-+ * configure RX filtering. When the device doesn't support unicast
-+ * filtering it is put in promiscuous mode while unicast addresses
-+ * are present.
-+ */
-+void __dev_set_rx_mode(struct net_device *dev)
-+{
-+ /* dev_open will call this function so the list will stay sane. */
-+ if (!(dev->flags&IFF_UP))
-+ return;
-+
-+ if (!netif_device_present(dev))
-+ return;
-+
-+ if (dev->set_rx_mode)
-+ dev->set_rx_mode(dev);
-+ else {
-+ /* Unicast addresses changes may only happen under the rtnl,
-+ * therefore calling __dev_set_promiscuity here is safe.
-+ */
-+ if (dev->uc_count > 0 && !dev->uc_promisc) {
-+ __dev_set_promiscuity(dev, 1);
-+ dev->uc_promisc = 1;
-+ } else if (dev->uc_count == 0 && dev->uc_promisc) {
-+ __dev_set_promiscuity(dev, -1);
-+ dev->uc_promisc = 0;
-+ }
-+
-+ if (dev->set_multicast_list)
-+ dev->set_multicast_list(dev);
-+ }
-+}
-+
-+void dev_set_rx_mode(struct net_device *dev)
-+{
-+ netif_addr_lock_bh(dev);
-+ __dev_set_rx_mode(dev);
-+ netif_addr_unlock_bh(dev);
-+}
-+
-+int __dev_addr_delete(struct dev_addr_list **list, int *count,
-+ void *addr, int alen, int glbl)
-+{
-+ struct dev_addr_list *da;
-+
-+ for (; (da = *list) != NULL; list = &da->next) {
-+ if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
-+ alen == da->da_addrlen) {
-+ if (glbl) {
-+ int old_glbl = da->da_gusers;
-+ da->da_gusers = 0;
-+ if (old_glbl == 0)
-+ break;
-+ }
-+ if (--da->da_users)
-+ return 0;
-+
-+ *list = da->next;
-+ kfree(da);
-+ (*count)--;
-+ return 0;
-+ }
-+ }
-+ return -ENOENT;
-+}
-+
-+int __dev_addr_add(struct dev_addr_list **list, int *count,
-+ void *addr, int alen, int glbl)
-+{
-+ struct dev_addr_list *da;
-+
-+ for (da = *list; da != NULL; da = da->next) {
-+ if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
-+ da->da_addrlen == alen) {
-+ if (glbl) {
-+ int old_glbl = da->da_gusers;
-+ da->da_gusers = 1;
-+ if (old_glbl)
-+ return 0;
-+ }
-+ da->da_users++;
-+ return 0;
-+ }
-+ }
-+
-+ da = kzalloc(sizeof(*da), GFP_ATOMIC);
-+ if (da == NULL)
-+ return -ENOMEM;
-+ memcpy(da->da_addr, addr, alen);
-+ da->da_addrlen = alen;
-+ da->da_users = 1;
-+ da->da_gusers = glbl ? 1 : 0;
-+ da->next = *list;
-+ *list = da;
-+ (*count)++;
-+ return 0;
-+}
-+
-+/**
-+ * dev_unicast_delete - Release secondary unicast address.
-+ * @dev: device
-+ * @addr: address to delete
-+ * @alen: length of @addr
-+ *
-+ * Release reference to a secondary unicast address and remove it
-+ * from the device if the reference count drops to zero.
-+ *
-+ * The caller must hold the rtnl_mutex.
-+ */
-+int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
-+{
-+ int err;
-+
-+ ASSERT_RTNL();
-+
-+ netif_addr_lock_bh(dev);
-+ err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
-+ if (!err)
-+ __dev_set_rx_mode(dev);
-+ netif_addr_unlock_bh(dev);
-+ return err;
-+}
-+EXPORT_SYMBOL(dev_unicast_delete);
-+
-+/**
-+ * dev_unicast_add - add a secondary unicast address
-+ * @dev: device
-+ * @addr: address to add
-+ * @alen: length of @addr
-+ *
-+ * Add a secondary unicast address to the device or increase
-+ * the reference count if it already exists.
-+ *
-+ * The caller must hold the rtnl_mutex.
-+ */
-+int dev_unicast_add(struct net_device *dev, void *addr, int alen)
-+{
-+ int err;
-+
-+ ASSERT_RTNL();
-+
-+ netif_addr_lock_bh(dev);
-+ err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
-+ if (!err)
-+ __dev_set_rx_mode(dev);
-+ netif_addr_unlock_bh(dev);
-+ return err;
-+}
-+EXPORT_SYMBOL(dev_unicast_add);
-+
-+int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
-+ struct dev_addr_list **from, int *from_count)
-+{
-+ struct dev_addr_list *da, *next;
-+ int err = 0;
-+
-+ da = *from;
-+ while (da != NULL) {
-+ next = da->next;
-+ if (!da->da_synced) {
-+ err = __dev_addr_add(to, to_count,
-+ da->da_addr, da->da_addrlen, 0);
-+ if (err < 0)
-+ break;
-+ da->da_synced = 1;
-+ da->da_users++;
-+ } else if (da->da_users == 1) {
-+ __dev_addr_delete(to, to_count,
-+ da->da_addr, da->da_addrlen, 0);
-+ __dev_addr_delete(from, from_count,
-+ da->da_addr, da->da_addrlen, 0);
-+ }
-+ da = next;
-+ }
-+ return err;
-+}
-+
-+void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
-+ struct dev_addr_list **from, int *from_count)
-+{
-+ struct dev_addr_list *da, *next;
-+
-+ da = *from;
-+ while (da != NULL) {
-+ next = da->next;
-+ if (da->da_synced) {
-+ __dev_addr_delete(to, to_count,
-+ da->da_addr, da->da_addrlen, 0);
-+ da->da_synced = 0;
-+ __dev_addr_delete(from, from_count,
-+ da->da_addr, da->da_addrlen, 0);
-+ }
-+ da = next;
-+ }
-+}
-+
-+/**
-+ * dev_unicast_sync - Synchronize device's unicast list to another device
-+ * @to: destination device
-+ * @from: source device
-+ *
-+ * Add newly added addresses to the destination device and release
-+ * addresses that have no users left. The source device must be
-+ * locked by netif_tx_lock_bh.
-+ *
-+ * This function is intended to be called from the dev->set_rx_mode
-+ * function of layered software devices.
-+ */
-+int dev_unicast_sync(struct net_device *to, struct net_device *from)
-+{
-+ int err = 0;
-+
-+ netif_addr_lock_bh(to);
-+ err = __dev_addr_sync(&to->uc_list, &to->uc_count,
-+ &from->uc_list, &from->uc_count);
-+ if (!err)
-+ __dev_set_rx_mode(to);
-+ netif_addr_unlock_bh(to);
-+ return err;
-+}
-+EXPORT_SYMBOL(dev_unicast_sync);
-+
-+/**
-+ * dev_unicast_unsync - Remove synchronized addresses from the destination device
-+ * @to: destination device
-+ * @from: source device
-+ *
-+ * Remove all addresses that were added to the destination device by
-+ * dev_unicast_sync(). This function is intended to be called from the
-+ * dev->stop function of layered software devices.
-+ */
-+void dev_unicast_unsync(struct net_device *to, struct net_device *from)
-+{
-+ netif_addr_lock_bh(from);
-+ netif_addr_lock(to);
-+
-+ __dev_addr_unsync(&to->uc_list, &to->uc_count,
-+ &from->uc_list, &from->uc_count);
-+ __dev_set_rx_mode(to);
-+
-+ netif_addr_unlock(to);
-+ netif_addr_unlock_bh(from);
-+}
-+EXPORT_SYMBOL(dev_unicast_unsync);
-+
-+static void __dev_addr_discard(struct dev_addr_list **list)
-+{
-+ struct dev_addr_list *tmp;
-+
-+ while (*list != NULL) {
-+ tmp = *list;
-+ *list = tmp->next;
-+ if (tmp->da_users > tmp->da_gusers)
-+ printk("__dev_addr_discard: address leakage! "
-+ "da_users=%d\n", tmp->da_users);
-+ kfree(tmp);
-+ }
-+}
-+
-+static void dev_addr_discard(struct net_device *dev)
-+{
-+ netif_addr_lock_bh(dev);
-+
-+ __dev_addr_discard(&dev->uc_list);
-+ dev->uc_count = 0;
-+
-+ __dev_addr_discard(&dev->mc_list);
-+ dev->mc_count = 0;
-+
-+ netif_addr_unlock_bh(dev);
-+}
-+
-+unsigned dev_get_flags(const struct net_device *dev)
-+{
-+ unsigned flags;
-+
-+ flags = (dev->flags & ~(IFF_PROMISC |
-+ IFF_ALLMULTI |
-+ IFF_RUNNING |
-+ IFF_LOWER_UP |
-+ IFF_DORMANT)) |
-+ (dev->gflags & (IFF_PROMISC |
-+ IFF_ALLMULTI));
-+
-+ if (netif_running(dev)) {
-+ if (netif_oper_up(dev))
-+ flags |= IFF_RUNNING;
-+ if (netif_carrier_ok(dev))
-+ flags |= IFF_LOWER_UP;
-+ if (netif_dormant(dev))
-+ flags |= IFF_DORMANT;
-+ }
-+
-+ return flags;
-+}
-+
-+int dev_change_flags(struct net_device *dev, unsigned flags)
-+{
-+ int ret, changes;
-+ int old_flags = dev->flags;
-+
-+ ASSERT_RTNL();
-+
-+ /*
-+ * Set the flags on our device.
-+ */
-+
-+ dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
-+ IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
-+ IFF_AUTOMEDIA)) |
-+ (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
-+ IFF_ALLMULTI));
-+
-+ /*
-+ * Load in the correct multicast list now the flags have changed.
-+ */
-+
-+ if ((old_flags ^ flags) & IFF_MULTICAST)
-+ dev_change_rx_flags(dev, IFF_MULTICAST);
-+
-+ dev_set_rx_mode(dev);
-+
-+ /*
-+ * Have we downed the interface. We handle IFF_UP ourselves
-+ * according to user attempts to set it, rather than blindly
-+ * setting it.
-+ */
-+
-+ ret = 0;
-+ if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
-+ ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
-+
-+ if (!ret)
-+ dev_set_rx_mode(dev);
-+ }
-+
-+ if (dev->flags & IFF_UP &&
-+ ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
-+ IFF_VOLATILE)))
-+ call_netdevice_notifiers(NETDEV_CHANGE, dev);
-+
-+ if ((flags ^ dev->gflags) & IFF_PROMISC) {
-+ int inc = (flags & IFF_PROMISC) ? +1 : -1;
-+ dev->gflags ^= IFF_PROMISC;
-+ dev_set_promiscuity(dev, inc);
-+ }
-+
-+ /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
-+ is important. Some (broken) drivers set IFF_PROMISC, when
-+ IFF_ALLMULTI is requested not asking us and not reporting.
-+ */
-+ if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
-+ int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
-+ dev->gflags ^= IFF_ALLMULTI;
-+ dev_set_allmulti(dev, inc);
-+ }
-+
-+ /* Exclude state transition flags, already notified */
-+ changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
-+ if (changes)
-+ rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
-+
-+ return ret;
-+}
-+
-+int dev_set_mtu(struct net_device *dev, int new_mtu)
-+{
-+ int err;
-+
-+ if (new_mtu == dev->mtu)
-+ return 0;
-+
-+ /* MTU must be positive. */
-+ if (new_mtu < 0)
-+ return -EINVAL;
-+
-+ if (!netif_device_present(dev))
-+ return -ENODEV;
-+
-+ err = 0;
-+ if (dev->change_mtu)
-+ err = dev->change_mtu(dev, new_mtu);
-+ else
-+ dev->mtu = new_mtu;
-+ if (!err && dev->flags & IFF_UP)
-+ call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
-+ return err;
-+}
-+
-+int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
-+{
-+ int err;
-+
-+ if (!dev->set_mac_address)
-+ return -EOPNOTSUPP;
-+ if (sa->sa_family != dev->type)
-+ return -EINVAL;
-+ if (!netif_device_present(dev))
-+ return -ENODEV;
-+ err = dev->set_mac_address(dev, sa);
-+ if (!err)
-+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
-+ return err;
-+}
-+
-+/*
-+ * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
-+ */
-+static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
-+{
-+ int err;
-+ struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
-+
-+ if (!dev)
-+ return -ENODEV;
-+
-+ switch (cmd) {
-+ case SIOCGIFFLAGS: /* Get interface flags */
-+ ifr->ifr_flags = dev_get_flags(dev);
-+ return 0;
-+
-+ case SIOCGIFMETRIC: /* Get the metric on the interface
-+ (currently unused) */
-+ ifr->ifr_metric = 0;
-+ return 0;
-+
-+ case SIOCGIFMTU: /* Get the MTU of a device */
-+ ifr->ifr_mtu = dev->mtu;
-+ return 0;
-+
-+ case SIOCGIFHWADDR:
-+ if (!dev->addr_len)
-+ memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
-+ else
-+ memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
-+ min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
-+ ifr->ifr_hwaddr.sa_family = dev->type;
-+ return 0;
-+
-+ case SIOCGIFSLAVE:
-+ err = -EINVAL;
-+ break;
-+
-+ case SIOCGIFMAP:
-+ ifr->ifr_map.mem_start = dev->mem_start;
-+ ifr->ifr_map.mem_end = dev->mem_end;
-+ ifr->ifr_map.base_addr = dev->base_addr;
-+ ifr->ifr_map.irq = dev->irq;
-+ ifr->ifr_map.dma = dev->dma;
-+ ifr->ifr_map.port = dev->if_port;
-+ return 0;
-+
-+ case SIOCGIFINDEX:
-+ ifr->ifr_ifindex = dev->ifindex;
-+ return 0;
-+
-+ case SIOCGIFTXQLEN:
-+ ifr->ifr_qlen = dev->tx_queue_len;
-+ return 0;
-+
-+ default:
-+ /* dev_ioctl() should ensure this case
-+ * is never reached
-+ */
-+ WARN_ON(1);
-+ err = -EINVAL;
-+ break;
-+
-+ }
-+ return err;
-+}
-+
-+/*
-+ * Perform the SIOCxIFxxx calls, inside rtnl_lock()
-+ */
-+static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
-+{
-+ int err;
-+ struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
-+
-+ if (!dev)
-+ return -ENODEV;
-+
-+ switch (cmd) {
-+ case SIOCSIFFLAGS: /* Set interface flags */
-+ return dev_change_flags(dev, ifr->ifr_flags);
-+
-+ case SIOCSIFMETRIC: /* Set the metric on the interface
-+ (currently unused) */
-+ return -EOPNOTSUPP;
-+
-+ case SIOCSIFMTU: /* Set the MTU of a device */
-+ return dev_set_mtu(dev, ifr->ifr_mtu);
-+
-+ case SIOCSIFHWADDR:
-+ return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
-+
-+ case SIOCSIFHWBROADCAST:
-+ if (ifr->ifr_hwaddr.sa_family != dev->type)
-+ return -EINVAL;
-+ memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
-+ min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
-+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
-+ return 0;
-+
-+ case SIOCSIFMAP:
-+ if (dev->set_config) {
-+ if (!netif_device_present(dev))
-+ return -ENODEV;
-+ return dev->set_config(dev, &ifr->ifr_map);
-+ }
-+ return -EOPNOTSUPP;
-+
-+ case SIOCADDMULTI:
-+ if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
-+ ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
-+ return -EINVAL;
-+ if (!netif_device_present(dev))
-+ return -ENODEV;
-+ return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
-+ dev->addr_len, 1);
-+
-+ case SIOCDELMULTI:
-+ if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
-+ ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
-+ return -EINVAL;
-+ if (!netif_device_present(dev))
-+ return -ENODEV;
-+ return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
-+ dev->addr_len, 1);
-+
-+ case SIOCSIFTXQLEN:
-+ if (ifr->ifr_qlen < 0)
-+ return -EINVAL;
-+ dev->tx_queue_len = ifr->ifr_qlen;
-+ return 0;
-+
-+ case SIOCSIFNAME:
-+ ifr->ifr_newname[IFNAMSIZ-1] = '\0';
-+ return dev_change_name(dev, ifr->ifr_newname);
-+
-+ /*
-+ * Unknown or private ioctl
-+ */
-+
-+ default:
-+ if ((cmd >= SIOCDEVPRIVATE &&
-+ cmd <= SIOCDEVPRIVATE + 15) ||
-+ cmd == SIOCBONDENSLAVE ||
-+ cmd == SIOCBONDRELEASE ||
-+ cmd == SIOCBONDSETHWADDR ||
-+ cmd == SIOCBONDSLAVEINFOQUERY ||
-+ cmd == SIOCBONDINFOQUERY ||
-+ cmd == SIOCBONDCHANGEACTIVE ||
-+ cmd == SIOCGMIIPHY ||
-+ cmd == SIOCGMIIREG ||
-+ cmd == SIOCSMIIREG ||
-+ cmd == SIOCBRADDIF ||
-+ cmd == SIOCBRDELIF ||
-+ cmd == SIOCWANDEV) {
-+ err = -EOPNOTSUPP;
-+ if (dev->do_ioctl) {
-+ if (netif_device_present(dev))
-+ err = dev->do_ioctl(dev, ifr,
-+ cmd);
-+ else
-+ err = -ENODEV;
-+ }
-+ } else
-+ err = -EINVAL;
-+
-+ }
-+ return err;
-+}
-+
-+/*
-+ * This function handles all "interface"-type I/O control requests. The actual
-+ * 'doing' part of this is dev_ifsioc above.
-+ */
-+
-+/**
-+ * dev_ioctl - network device ioctl
-+ * @net: the applicable net namespace
-+ * @cmd: command to issue
-+ * @arg: pointer to a struct ifreq in user space
-+ *
-+ * Issue ioctl functions to devices. This is normally called by the
-+ * user space syscall interfaces but can sometimes be useful for
-+ * other purposes. The return value is the return from the syscall if
-+ * positive or a negative errno code on error.
-+ */
-+
-+int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
-+{
-+ struct ifreq ifr;
-+ int ret;
-+ char *colon;
-+
-+ /* One special case: SIOCGIFCONF takes ifconf argument
-+ and requires shared lock, because it sleeps writing
-+ to user space.
-+ */
-+
-+ if (cmd == SIOCGIFCONF) {
-+ rtnl_lock();
-+ ret = dev_ifconf(net, (char __user *) arg);
-+ rtnl_unlock();
-+ return ret;
-+ }
-+ if (cmd == SIOCGIFNAME)
-+ return dev_ifname(net, (struct ifreq __user *)arg);
-+
-+ if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
-+ return -EFAULT;
-+
-+ ifr.ifr_name[IFNAMSIZ-1] = 0;
-+
-+ colon = strchr(ifr.ifr_name, ':');
-+ if (colon)
-+ *colon = 0;
-+
-+ /*
-+ * See which interface the caller is talking about.
-+ */
-+
-+ switch (cmd) {
-+ /*
-+ * These ioctl calls:
-+ * - can be done by all.
-+ * - atomic and do not require locking.
-+ * - return a value
-+ */
-+ case SIOCGIFFLAGS:
-+ case SIOCGIFMETRIC:
-+ case SIOCGIFMTU:
-+ case SIOCGIFHWADDR:
-+ case SIOCGIFSLAVE:
-+ case SIOCGIFMAP:
-+ case SIOCGIFINDEX:
-+ case SIOCGIFTXQLEN:
-+ dev_load(net, ifr.ifr_name);
-+ read_lock(&dev_base_lock);
-+ ret = dev_ifsioc_locked(net, &ifr, cmd);
-+ read_unlock(&dev_base_lock);
-+ if (!ret) {
-+ if (colon)
-+ *colon = ':';
-+ if (copy_to_user(arg, &ifr,
-+ sizeof(struct ifreq)))
-+ ret = -EFAULT;
-+ }
-+ return ret;
-+
-+ case SIOCETHTOOL:
-+ dev_load(net, ifr.ifr_name);
-+ rtnl_lock();
-+ ret = dev_ethtool(net, &ifr);
-+ rtnl_unlock();
-+ if (!ret) {
-+ if (colon)
-+ *colon = ':';
-+ if (copy_to_user(arg, &ifr,
-+ sizeof(struct ifreq)))
-+ ret = -EFAULT;
-+ }
-+ return ret;
-+
-+ /*
-+ * These ioctl calls:
-+ * - require superuser power.
-+ * - require strict serialization.
-+ * - return a value
-+ */
-+ case SIOCGMIIPHY:
-+ case SIOCGMIIREG:
-+ case SIOCSIFNAME:
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+ dev_load(net, ifr.ifr_name);
-+ rtnl_lock();
-+ ret = dev_ifsioc(net, &ifr, cmd);
-+ rtnl_unlock();
-+ if (!ret) {
-+ if (colon)
-+ *colon = ':';
-+ if (copy_to_user(arg, &ifr,
-+ sizeof(struct ifreq)))
-+ ret = -EFAULT;
-+ }
-+ return ret;
-+
-+ /*
-+ * These ioctl calls:
-+ * - require superuser power.
-+ * - require strict serialization.
-+ * - do not return a value
-+ */
-+ case SIOCSIFFLAGS:
-+ case SIOCSIFMETRIC:
-+ case SIOCSIFMTU:
-+ case SIOCSIFMAP:
-+ case SIOCSIFHWADDR:
-+ case SIOCSIFSLAVE:
-+ case SIOCADDMULTI:
-+ case SIOCDELMULTI:
-+ case SIOCSIFHWBROADCAST:
-+ case SIOCSIFTXQLEN:
-+ case SIOCSMIIREG:
-+ case SIOCBONDENSLAVE:
-+ case SIOCBONDRELEASE:
-+ case SIOCBONDSETHWADDR:
-+ case SIOCBONDCHANGEACTIVE:
-+ case SIOCBRADDIF:
-+ case SIOCBRDELIF:
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+ /* fall through */
-+ case SIOCBONDSLAVEINFOQUERY:
-+ case SIOCBONDINFOQUERY:
-+ dev_load(net, ifr.ifr_name);
-+ rtnl_lock();
-+ ret = dev_ifsioc(net, &ifr, cmd);
-+ rtnl_unlock();
-+ return ret;
-+
-+ case SIOCGIFMEM:
-+ /* Get the per device memory space. We can add this but
-+ * currently do not support it */
-+ case SIOCSIFMEM:
-+ /* Set the per device memory buffer space.
-+ * Not applicable in our case */
-+ case SIOCSIFLINK:
-+ return -EINVAL;
-+
-+ /*
-+ * Unknown or private ioctl.
-+ */
-+ default:
-+ if (cmd == SIOCWANDEV ||
-+ (cmd >= SIOCDEVPRIVATE &&
-+ cmd <= SIOCDEVPRIVATE + 15)) {
-+ dev_load(net, ifr.ifr_name);
-+ rtnl_lock();
-+ ret = dev_ifsioc(net, &ifr, cmd);
-+ rtnl_unlock();
-+ if (!ret && copy_to_user(arg, &ifr,
-+ sizeof(struct ifreq)))
-+ ret = -EFAULT;
-+ return ret;
-+ }
-+ /* Take care of Wireless Extensions */
-+ if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
-+ return wext_handle_ioctl(net, &ifr, cmd, arg);
-+ return -EINVAL;
-+ }
-+}
-+
-+
-+/**
-+ * dev_new_index - allocate an ifindex
-+ * @net: the applicable net namespace
-+ *
-+ * Returns a suitable unique value for a new device interface
-+ * number. The caller must hold the rtnl semaphore or the
-+ * dev_base_lock to be sure it remains unique.
-+ */
-+static int dev_new_index(struct net *net)
-+{
-+ static int ifindex;
-+ for (;;) {
-+ if (++ifindex <= 0)
-+ ifindex = 1;
-+ if (!__dev_get_by_index(net, ifindex))
-+ return ifindex;
-+ }
-+}
-+
-+/* Delayed registration/unregisteration */
-+static LIST_HEAD(net_todo_list);
-+
-+static void net_set_todo(struct net_device *dev)
-+{
-+ list_add_tail(&dev->todo_list, &net_todo_list);
-+}
-+
-+static void rollback_registered(struct net_device *dev)
-+{
-+ BUG_ON(dev_boot_phase);
-+ ASSERT_RTNL();
-+
-+ /* Some devices call without registering for initialization unwind. */
-+ if (dev->reg_state == NETREG_UNINITIALIZED) {
-+ printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
-+ "was registered\n", dev->name, dev);
-+
-+ WARN_ON(1);
-+ return;
-+ }
-+
-+ BUG_ON(dev->reg_state != NETREG_REGISTERED);
-+
-+ /* If device is running, close it first. */
-+ dev_close(dev);
-+
-+ /* And unlink it from device chain. */
-+ unlist_netdevice(dev);
-+
-+ dev->reg_state = NETREG_UNREGISTERING;
-+
-+ synchronize_net();
-+
-+ /* Shutdown queueing discipline. */
-+ dev_shutdown(dev);
-+
-+
-+ /* Notify protocols, that we are about to destroy
-+ this device. They should clean all the things.
-+ */
-+ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
-+
-+ /*
-+ * Flush the unicast and multicast chains
-+ */
-+ dev_addr_discard(dev);
-+
-+ if (dev->uninit)
-+ dev->uninit(dev);
-+
-+ /* Notifier chain MUST detach us from master device. */
-+ WARN_ON(dev->master);
-+
-+ /* Remove entries from kobject tree */
-+ netdev_unregister_kobject(dev);
-+
-+ synchronize_net();
-+
-+ dev_put(dev);
-+}
-+
-+static void __netdev_init_queue_locks_one(struct net_device *dev,
-+ struct netdev_queue *dev_queue,
-+ void *_unused)
-+{
-+ spin_lock_init(&dev_queue->_xmit_lock);
-+ netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
-+ dev_queue->xmit_lock_owner = -1;
-+}
-+
-+static void netdev_init_queue_locks(struct net_device *dev)
-+{
-+ netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
-+ __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
-+}
-+
-+/**
-+ * register_netdevice - register a network device
-+ * @dev: device to register
-+ *
-+ * Take a completed network device structure and add it to the kernel
-+ * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
-+ * chain. 0 is returned on success. A negative errno code is returned
-+ * on a failure to set up the device, or if the name is a duplicate.
-+ *
-+ * Callers must hold the rtnl semaphore. You may want
-+ * register_netdev() instead of this.
-+ *
-+ * BUGS:
-+ * The locking appears insufficient to guarantee two parallel registers
-+ * will not get the same name.
-+ */
-+
-+int register_netdevice(struct net_device *dev)
-+{
-+ struct hlist_head *head;
-+ struct hlist_node *p;
-+ int ret;
-+ struct net *net;
-+
-+ BUG_ON(dev_boot_phase);
-+ ASSERT_RTNL();
-+
-+ might_sleep();
-+
-+ /* When net_device's are persistent, this will be fatal. */
-+ BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
-+ BUG_ON(!dev_net(dev));
-+ net = dev_net(dev);
-+
-+ spin_lock_init(&dev->addr_list_lock);
-+ netdev_set_addr_lockdep_class(dev);
-+ netdev_init_queue_locks(dev);
-+
-+ dev->iflink = -1;
-+
-+ /* Init, if this function is available */
-+ if (dev->init) {
-+ ret = dev->init(dev);
-+ if (ret) {
-+ if (ret > 0)
-+ ret = -EIO;
-+ goto out;
-+ }
-+ }
-+
-+ if (!dev_valid_name(dev->name)) {
-+ ret = -EINVAL;
-+ goto err_uninit;
-+ }
-+
-+ dev->ifindex = dev_new_index(net);
-+ if (dev->iflink == -1)
-+ dev->iflink = dev->ifindex;
-+
-+ /* Check for existence of name */
-+ head = dev_name_hash(net, dev->name);
-+ hlist_for_each(p, head) {
-+ struct net_device *d
-+ = hlist_entry(p, struct net_device, name_hlist);
-+ if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
-+ ret = -EEXIST;
-+ goto err_uninit;
-+ }
-+ }
-+
-+ /* Fix illegal checksum combinations */
-+ if ((dev->features & NETIF_F_HW_CSUM) &&
-+ (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-+ printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
-+ dev->name);
-+ dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
-+ }
-+
-+ if ((dev->features & NETIF_F_NO_CSUM) &&
-+ (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-+ printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
-+ dev->name);
-+ dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
-+ }
-+
-+
-+ /* Fix illegal SG+CSUM combinations. */
-+ if ((dev->features & NETIF_F_SG) &&
-+ !(dev->features & NETIF_F_ALL_CSUM)) {
-+ printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
-+ dev->name);
-+ dev->features &= ~NETIF_F_SG;
-+ }
-+
-+ /* TSO requires that SG is present as well. */
-+ if ((dev->features & NETIF_F_TSO) &&
-+ !(dev->features & NETIF_F_SG)) {
-+ printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
-+ dev->name);
-+ dev->features &= ~NETIF_F_TSO;
-+ }
-+ if (dev->features & NETIF_F_UFO) {
-+ if (!(dev->features & NETIF_F_GEN_CSUM)) {
-+ printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
-+ "NETIF_F_HW_CSUM feature.\n",
-+ dev->name);
-+ dev->features &= ~NETIF_F_UFO;
-+ }
-+ if (!(dev->features & NETIF_F_SG)) {
-+ printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
-+ "NETIF_F_SG feature.\n",
-+ dev->name);
-+ dev->features &= ~NETIF_F_UFO;
-+ }
-+ }
-+
-+ /* Enable software GSO if SG is supported. */
-+ if (dev->features & NETIF_F_SG)
-+ dev->features |= NETIF_F_GSO;
-+
-+ netdev_initialize_kobject(dev);
-+ ret = netdev_register_kobject(dev);
-+ if (ret)
-+ goto err_uninit;
-+ dev->reg_state = NETREG_REGISTERED;
-+
-+ /*
-+ * Default initial state at registry is that the
-+ * device is present.
-+ */
-+
-+ set_bit(__LINK_STATE_PRESENT, &dev->state);
-+
-+ dev_init_scheduler(dev);
-+ dev_hold(dev);
-+ list_netdevice(dev);
-+
-+ /* Notify protocols, that a new device appeared. */
-+ ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
-+ ret = notifier_to_errno(ret);
-+ if (ret) {
-+ rollback_registered(dev);
-+ dev->reg_state = NETREG_UNREGISTERED;
-+ }
-+
-+out:
-+ return ret;
-+
-+err_uninit:
-+ if (dev->uninit)
-+ dev->uninit(dev);
-+ goto out;
-+}
-+
-+/**
-+ * register_netdev - register a network device
-+ * @dev: device to register
-+ *
-+ * Take a completed network device structure and add it to the kernel
-+ * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
-+ * chain. 0 is returned on success. A negative errno code is returned
-+ * on a failure to set up the device, or if the name is a duplicate.
-+ *
-+ * This is a wrapper around register_netdevice that takes the rtnl semaphore
-+ * and expands the device name if you passed a format string to
-+ * alloc_netdev.
-+ */
-+int register_netdev(struct net_device *dev)
-+{
-+ int err;
-+
-+ rtnl_lock();
-+
-+ /*
-+ * If the name is a format string the caller wants us to do a
-+ * name allocation.
-+ */
-+ if (strchr(dev->name, '%')) {
-+ err = dev_alloc_name(dev, dev->name);
-+ if (err < 0)
-+ goto out;
-+ }
-+
-+ err = register_netdevice(dev);
-+out:
-+ rtnl_unlock();
-+ return err;
-+}
-+EXPORT_SYMBOL(register_netdev);
-+
-+/*
-+ * netdev_wait_allrefs - wait until all references are gone.
-+ *
-+ * This is called when unregistering network devices.
-+ *
-+ * Any protocol or device that holds a reference should register
-+ * for netdevice notification, and cleanup and put back the
-+ * reference if they receive an UNREGISTER event.
-+ * We can get stuck here if buggy protocols don't correctly
-+ * call dev_put.
-+ */
-+static void netdev_wait_allrefs(struct net_device *dev)
-+{
-+ unsigned long rebroadcast_time, warning_time;
-+
-+ rebroadcast_time = warning_time = jiffies;
-+ while (atomic_read(&dev->refcnt) != 0) {
-+ if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
-+ rtnl_lock();
-+
-+ /* Rebroadcast unregister notification */
-+ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
-+
-+ if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
-+ &dev->state)) {
-+ /* We must not have linkwatch events
-+ * pending on unregister. If this
-+ * happens, we simply run the queue
-+ * unscheduled, resulting in a noop
-+ * for this device.
-+ */
-+ linkwatch_run_queue();
-+ }
-+
-+ __rtnl_unlock();
-+
-+ rebroadcast_time = jiffies;
-+ }
-+
-+ msleep(250);
-+
-+ if (time_after(jiffies, warning_time + 10 * HZ)) {
-+ printk(KERN_EMERG "unregister_netdevice: "
-+ "waiting for %s to become free. Usage "
-+ "count = %d\n",
-+ dev->name, atomic_read(&dev->refcnt));
-+ warning_time = jiffies;
-+ }
-+ }
-+}
-+
-+/* The sequence is:
-+ *
-+ * rtnl_lock();
-+ * ...
-+ * register_netdevice(x1);
-+ * register_netdevice(x2);
-+ * ...
-+ * unregister_netdevice(y1);
-+ * unregister_netdevice(y2);
-+ * ...
-+ * rtnl_unlock();
-+ * free_netdev(y1);
-+ * free_netdev(y2);
-+ *
-+ * We are invoked by rtnl_unlock().
-+ * This allows us to deal with problems:
-+ * 1) We can delete sysfs objects which invoke hotplug
-+ * without deadlocking with linkwatch via keventd.
-+ * 2) Since we run with the RTNL semaphore not held, we can sleep
-+ * safely in order to wait for the netdev refcnt to drop to zero.
-+ *
-+ * We must not return until all unregister events added during
-+ * the interval the lock was held have been completed.
-+ */
-+void netdev_run_todo(void)
-+{
-+ struct list_head list;
-+
-+ /* Snapshot list, allow later requests */
-+ list_replace_init(&net_todo_list, &list);
-+
-+ __rtnl_unlock();
-+
-+ while (!list_empty(&list)) {
-+ struct net_device *dev
-+ = list_entry(list.next, struct net_device, todo_list);
-+ list_del(&dev->todo_list);
-+
-+ if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
-+ printk(KERN_ERR "network todo '%s' but state %d\n",
-+ dev->name, dev->reg_state);
-+ dump_stack();
-+ continue;
-+ }
-+
-+ dev->reg_state = NETREG_UNREGISTERED;
-+
-+ on_each_cpu(flush_backlog, dev, 1);
-+
-+ netdev_wait_allrefs(dev);
-+
-+ /* paranoia */
-+ BUG_ON(atomic_read(&dev->refcnt));
-+ WARN_ON(dev->ip_ptr);
-+ WARN_ON(dev->ip6_ptr);
-+ WARN_ON(dev->dn_ptr);
-+
-+ if (dev->destructor)
-+ dev->destructor(dev);
-+
-+ /* Free network device */
-+ kobject_put(&dev->dev.kobj);
-+ }
-+}
-+
-+static struct net_device_stats *internal_stats(struct net_device *dev)
-+{
-+ return &dev->stats;
-+}
-+
-+static void netdev_init_one_queue(struct net_device *dev,
-+ struct netdev_queue *queue,
-+ void *_unused)
-+{
-+ queue->dev = dev;
-+}
-+
-+static void netdev_init_queues(struct net_device *dev)
-+{
-+ netdev_init_one_queue(dev, &dev->rx_queue, NULL);
-+ netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
-+ spin_lock_init(&dev->tx_global_lock);
-+}
-+
-+/**
-+ * alloc_netdev_mq - allocate network device
-+ * @sizeof_priv: size of private data to allocate space for
-+ * @name: device name format string
-+ * @setup: callback to initialize device
-+ * @queue_count: the number of subqueues to allocate
-+ *
-+ * Allocates a struct net_device with private data area for driver use
-+ * and performs basic initialization. Also allocates subquue structs
-+ * for each queue on the device at the end of the netdevice.
-+ */
-+struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
-+ void (*setup)(struct net_device *), unsigned int queue_count)
-+{
-+ struct netdev_queue *tx;
-+ struct net_device *dev;
-+ size_t alloc_size;
-+ void *p;
-+
-+ BUG_ON(strlen(name) >= sizeof(dev->name));
-+
-+ alloc_size = sizeof(struct net_device);
-+ if (sizeof_priv) {
-+ /* ensure 32-byte alignment of private area */
-+ alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
-+ alloc_size += sizeof_priv;
-+ }
-+ /* ensure 32-byte alignment of whole construct */
-+ alloc_size += NETDEV_ALIGN_CONST;
-+
-+ p = kzalloc(alloc_size, GFP_KERNEL);
-+ if (!p) {
-+ printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
-+ return NULL;
-+ }
-+
-+ tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
-+ if (!tx) {
-+ printk(KERN_ERR "alloc_netdev: Unable to allocate "
-+ "tx qdiscs.\n");
-+ kfree(p);
-+ return NULL;
-+ }
-+
-+ dev = (struct net_device *)
-+ (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
-+ dev->padded = (char *)dev - (char *)p;
-+ dev_net_set(dev, &init_net);
-+
-+ dev->_tx = tx;
-+ dev->num_tx_queues = queue_count;
-+ dev->real_num_tx_queues = queue_count;
-+
-+ if (sizeof_priv) {
-+ dev->priv = ((char *)dev +
-+ ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
-+ & ~NETDEV_ALIGN_CONST));
-+ }
-+
-+ dev->gso_max_size = GSO_MAX_SIZE;
-+
-+ netdev_init_queues(dev);
-+
-+ dev->get_stats = internal_stats;
-+ netpoll_netdev_init(dev);
-+ setup(dev);
-+ strcpy(dev->name, name);
-+ return dev;
-+}
-+EXPORT_SYMBOL(alloc_netdev_mq);
-+
-+/**
-+ * free_netdev - free network device
-+ * @dev: device
-+ *
-+ * This function does the last stage of destroying an allocated device
-+ * interface. The reference to the device object is released.
-+ * If this is the last reference then it will be freed.
-+ */
-+void free_netdev(struct net_device *dev)
-+{
-+ release_net(dev_net(dev));
-+
-+ kfree(dev->_tx);
-+
-+ /* Compatibility with error handling in drivers */
-+ if (dev->reg_state == NETREG_UNINITIALIZED) {
-+ kfree((char *)dev - dev->padded);
-+ return;
-+ }
-+
-+ BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
-+ dev->reg_state = NETREG_RELEASED;
-+
-+ /* will free via device release */
-+ put_device(&dev->dev);
-+}
-+
-+/* Synchronize with packet receive processing. */
-+void synchronize_net(void)
-+{
-+ might_sleep();
-+ synchronize_rcu();
-+}
-+
-+/**
-+ * unregister_netdevice - remove device from the kernel
-+ * @dev: device
-+ *
-+ * This function shuts down a device interface and removes it
-+ * from the kernel tables.
-+ *
-+ * Callers must hold the rtnl semaphore. You may want
-+ * unregister_netdev() instead of this.
-+ */
-+
-+void unregister_netdevice(struct net_device *dev)
-+{
-+ ASSERT_RTNL();
-+
-+ rollback_registered(dev);
-+ /* Finish processing unregister after unlock */
-+ net_set_todo(dev);
-+}
-+
-+/**
-+ * unregister_netdev - remove device from the kernel
-+ * @dev: device
-+ *
-+ * This function shuts down a device interface and removes it
-+ * from the kernel tables.
-+ *
-+ * This is just a wrapper for unregister_netdevice that takes
-+ * the rtnl semaphore. In general you want to use this and not
-+ * unregister_netdevice.
-+ */
-+void unregister_netdev(struct net_device *dev)
-+{
-+ rtnl_lock();
-+ unregister_netdevice(dev);
-+ rtnl_unlock();
-+}
-+
-+EXPORT_SYMBOL(unregister_netdev);
-+
-+/**
-+ * dev_change_net_namespace - move device to different nethost namespace
-+ * @dev: device
-+ * @net: network namespace
-+ * @pat: If not NULL name pattern to try if the current device name
-+ * is already taken in the destination network namespace.
-+ *
-+ * This function shuts down a device interface and moves it
-+ * to a new network namespace. On success 0 is returned, on
-+ * a failure a netagive errno code is returned.
-+ *
-+ * Callers must hold the rtnl semaphore.
-+ */
-+
-+int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
-+{
-+ char buf[IFNAMSIZ];
-+ const char *destname;
-+ int err;
-+
-+ ASSERT_RTNL();
-+
-+ /* Don't allow namespace local devices to be moved. */
-+ err = -EINVAL;
-+ if (dev->features & NETIF_F_NETNS_LOCAL)
-+ goto out;
-+
-+#ifdef CONFIG_SYSFS
-+ /* Don't allow real devices to be moved when sysfs
-+ * is enabled.
-+ */
-+ err = -EINVAL;
-+ if (dev->dev.parent)
-+ goto out;
-+#endif
-+
-+ /* Ensure the device has been registrered */
-+ err = -EINVAL;
-+ if (dev->reg_state != NETREG_REGISTERED)
-+ goto out;
-+
-+ /* Get out if there is nothing todo */
-+ err = 0;
-+ if (net_eq(dev_net(dev), net))
-+ goto out;
-+
-+ /* Pick the destination device name, and ensure
-+ * we can use it in the destination network namespace.
-+ */
-+ err = -EEXIST;
-+ destname = dev->name;
-+ if (__dev_get_by_name(net, destname)) {
-+ /* We get here if we can't use the current device name */
-+ if (!pat)
-+ goto out;
-+ if (!dev_valid_name(pat))
-+ goto out;
-+ if (strchr(pat, '%')) {
-+ if (__dev_alloc_name(net, pat, buf) < 0)
-+ goto out;
-+ destname = buf;
-+ } else
-+ destname = pat;
-+ if (__dev_get_by_name(net, destname))
-+ goto out;
-+ }
-+
-+ /*
-+ * And now a mini version of register_netdevice unregister_netdevice.
-+ */
-+
-+ /* If device is running close it first. */
-+ dev_close(dev);
-+
-+ /* And unlink it from device chain */
-+ err = -ENODEV;
-+ unlist_netdevice(dev);
-+
-+ synchronize_net();
-+
-+ /* Shutdown queueing discipline. */
-+ dev_shutdown(dev);
-+
-+ /* Notify protocols, that we are about to destroy
-+ this device. They should clean all the things.
-+ */
-+ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
-+
-+ /*
-+ * Flush the unicast and multicast chains
-+ */
-+ dev_addr_discard(dev);
-+
-+ netdev_unregister_kobject(dev);
-+
-+ /* Actually switch the network namespace */
-+ dev_net_set(dev, net);
-+
-+ /* Assign the new device name */
-+ if (destname != dev->name)
-+ strcpy(dev->name, destname);
-+
-+ /* If there is an ifindex conflict assign a new one */
-+ if (__dev_get_by_index(net, dev->ifindex)) {
-+ int iflink = (dev->iflink == dev->ifindex);
-+ dev->ifindex = dev_new_index(net);
-+ if (iflink)
-+ dev->iflink = dev->ifindex;
-+ }
-+
-+ /* Fixup kobjects */
-+ err = netdev_register_kobject(dev);
-+ WARN_ON(err);
-+
-+ /* Add the device back in the hashes */
-+ list_netdevice(dev);
-+
-+ /* Notify protocols, that a new device appeared. */
-+ call_netdevice_notifiers(NETDEV_REGISTER, dev);
-+
-+ synchronize_net();
-+ err = 0;
-+out:
-+ return err;
-+}
-+
-+static int dev_cpu_callback(struct notifier_block *nfb,
-+ unsigned long action,
-+ void *ocpu)
-+{
-+ struct sk_buff **list_skb;
-+ struct Qdisc **list_net;
-+ struct sk_buff *skb;
-+ unsigned int cpu, oldcpu = (unsigned long)ocpu;
-+ struct softnet_data *sd, *oldsd;
-+
-+ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
-+ return NOTIFY_OK;
-+
-+ local_irq_disable();
-+ cpu = smp_processor_id();
-+ sd = &per_cpu(softnet_data, cpu);
-+ oldsd = &per_cpu(softnet_data, oldcpu);
-+
-+ /* Find end of our completion_queue. */
-+ list_skb = &sd->completion_queue;
-+ while (*list_skb)
-+ list_skb = &(*list_skb)->next;
-+ /* Append completion queue from offline CPU. */
-+ *list_skb = oldsd->completion_queue;
-+ oldsd->completion_queue = NULL;
-+
-+ /* Find end of our output_queue. */
-+ list_net = &sd->output_queue;
-+ while (*list_net)
-+ list_net = &(*list_net)->next_sched;
-+ /* Append output queue from offline CPU. */
-+ *list_net = oldsd->output_queue;
-+ oldsd->output_queue = NULL;
-+
-+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
-+ local_irq_enable();
-+
-+ /* Process offline CPU's input_pkt_queue */
-+ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
-+ netif_rx(skb);
-+
-+ return NOTIFY_OK;
-+}
-+
-+#ifdef CONFIG_NET_DMA
-+/**
-+ * net_dma_rebalance - try to maintain one DMA channel per CPU
-+ * @net_dma: DMA client and associated data (lock, channels, channel_mask)
-+ *
-+ * This is called when the number of channels allocated to the net_dma client
-+ * changes. The net_dma client tries to have one DMA channel per CPU.
-+ */
-+
-+static void net_dma_rebalance(struct net_dma *net_dma)
-+{
-+ unsigned int cpu, i, n, chan_idx;
-+ struct dma_chan *chan;
-+
-+ if (cpus_empty(net_dma->channel_mask)) {
-+ for_each_online_cpu(cpu)
-+ rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
-+ return;
-+ }
-+
-+ i = 0;
-+ cpu = first_cpu(cpu_online_map);
-+
-+ for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
-+ chan = net_dma->channels[chan_idx];
-+
-+ n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
-+ + (i < (num_online_cpus() %
-+ cpus_weight(net_dma->channel_mask)) ? 1 : 0));
-+
-+ while(n) {
-+ per_cpu(softnet_data, cpu).net_dma = chan;
-+ cpu = next_cpu(cpu, cpu_online_map);
-+ n--;
-+ }
-+ i++;
-+ }
-+}
-+
-+/**
-+ * netdev_dma_event - event callback for the net_dma_client
-+ * @client: should always be net_dma_client
-+ * @chan: DMA channel for the event
-+ * @state: DMA state to be handled
-+ */
-+static enum dma_state_client
-+netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
-+ enum dma_state state)
-+{
-+ int i, found = 0, pos = -1;
-+ struct net_dma *net_dma =
-+ container_of(client, struct net_dma, client);
-+ enum dma_state_client ack = DMA_DUP; /* default: take no action */
-+
-+ spin_lock(&net_dma->lock);
-+ switch (state) {
-+ case DMA_RESOURCE_AVAILABLE:
-+ for (i = 0; i < nr_cpu_ids; i++)
-+ if (net_dma->channels[i] == chan) {
-+ found = 1;
-+ break;
-+ } else if (net_dma->channels[i] == NULL && pos < 0)
-+ pos = i;
-+
-+ if (!found && pos >= 0) {
-+ ack = DMA_ACK;
-+ net_dma->channels[pos] = chan;
-+ cpu_set(pos, net_dma->channel_mask);
-+ net_dma_rebalance(net_dma);
-+ }
-+ break;
-+ case DMA_RESOURCE_REMOVED:
-+ for (i = 0; i < nr_cpu_ids; i++)
-+ if (net_dma->channels[i] == chan) {
-+ found = 1;
-+ pos = i;
-+ break;
-+ }
-+
-+ if (found) {
-+ ack = DMA_ACK;
-+ cpu_clear(pos, net_dma->channel_mask);
-+ net_dma->channels[i] = NULL;
-+ net_dma_rebalance(net_dma);
-+ }
-+ break;
-+ default:
-+ break;
-+ }
-+ spin_unlock(&net_dma->lock);
-+
-+ return ack;
-+}
-+
-+/**
-+ * netdev_dma_regiser - register the networking subsystem as a DMA client
-+ */
-+static int __init netdev_dma_register(void)
-+{
-+ net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
-+ GFP_KERNEL);
-+ if (unlikely(!net_dma.channels)) {
-+ printk(KERN_NOTICE
-+ "netdev_dma: no memory for net_dma.channels\n");
-+ return -ENOMEM;
-+ }
-+ spin_lock_init(&net_dma.lock);
-+ dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
-+ dma_async_client_register(&net_dma.client);
-+ dma_async_client_chan_request(&net_dma.client);
-+ return 0;
-+}
-+
-+#else
-+static int __init netdev_dma_register(void) { return -ENODEV; }
-+#endif /* CONFIG_NET_DMA */
-+
-+/**
-+ * netdev_compute_feature - compute conjunction of two feature sets
-+ * @all: first feature set
-+ * @one: second feature set
-+ *
-+ * Computes a new feature set after adding a device with feature set
-+ * @one to the master device with current feature set @all. Returns
-+ * the new feature set.
-+ */
-+int netdev_compute_features(unsigned long all, unsigned long one)
-+{
-+ /* if device needs checksumming, downgrade to hw checksumming */
-+ if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
-+ all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
-+
-+ /* if device can't do all checksum, downgrade to ipv4/ipv6 */
-+ if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
-+ all ^= NETIF_F_HW_CSUM
-+ | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-+
-+ if (one & NETIF_F_GSO)
-+ one |= NETIF_F_GSO_SOFTWARE;
-+ one |= NETIF_F_GSO;
-+
-+ /* If even one device supports robust GSO, enable it for all. */
-+ if (one & NETIF_F_GSO_ROBUST)
-+ all |= NETIF_F_GSO_ROBUST;
-+
-+ all &= one | NETIF_F_LLTX;
-+
-+ if (!(all & NETIF_F_ALL_CSUM))
-+ all &= ~NETIF_F_SG;
-+ if (!(all & NETIF_F_SG))
-+ all &= ~NETIF_F_GSO_MASK;
-+
-+ return all;
-+}
-+EXPORT_SYMBOL(netdev_compute_features);
-+
-+static struct hlist_head *netdev_create_hash(void)
-+{
-+ int i;
-+ struct hlist_head *hash;
-+
-+ hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
-+ if (hash != NULL)
-+ for (i = 0; i < NETDEV_HASHENTRIES; i++)
-+ INIT_HLIST_HEAD(&hash[i]);
-+
-+ return hash;
-+}
-+
-+/* Initialize per network namespace state */
-+static int __net_init netdev_init(struct net *net)
-+{
-+ INIT_LIST_HEAD(&net->dev_base_head);
-+
-+ net->dev_name_head = netdev_create_hash();
-+ if (net->dev_name_head == NULL)
-+ goto err_name;
-+
-+ net->dev_index_head = netdev_create_hash();
-+ if (net->dev_index_head == NULL)
-+ goto err_idx;
-+
-+ return 0;
-+
-+err_idx:
-+ kfree(net->dev_name_head);
-+err_name:
-+ return -ENOMEM;
-+}
-+
-+char *netdev_drivername(struct net_device *dev, char *buffer, int len)
-+{
-+ struct device_driver *driver;
-+ struct device *parent;
-+
-+ if (len <= 0 || !buffer)
-+ return buffer;
-+ buffer[0] = 0;
-+
-+ parent = dev->dev.parent;
-+
-+ if (!parent)
-+ return buffer;
-+
-+ driver = parent->driver;
-+ if (driver && driver->name)
-+ strlcpy(buffer, driver->name, len);
-+ return buffer;
-+}
-+
-+static void __net_exit netdev_exit(struct net *net)
-+{
-+ kfree(net->dev_name_head);
-+ kfree(net->dev_index_head);
-+}
-+
-+static struct pernet_operations __net_initdata netdev_net_ops = {
-+ .init = netdev_init,
-+ .exit = netdev_exit,
-+};
-+
-+static void __net_exit default_device_exit(struct net *net)
-+{
-+ struct net_device *dev, *next;
-+ /*
-+ * Push all migratable of the network devices back to the
-+ * initial network namespace
-+ */
-+ rtnl_lock();
-+ for_each_netdev_safe(net, dev, next) {
-+ int err;
-+ char fb_name[IFNAMSIZ];
-+
-+ /* Ignore unmoveable devices (i.e. loopback) */
-+ if (dev->features & NETIF_F_NETNS_LOCAL)
-+ continue;
-+
-+ /* Push remaing network devices to init_net */
-+ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
-+ err = dev_change_net_namespace(dev, &init_net, fb_name);
-+ if (err) {
-+ printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
-+ __func__, dev->name, err);
-+ BUG();
-+ }
-+ }
-+ rtnl_unlock();
-+}
-+
-+static struct pernet_operations __net_initdata default_device_ops = {
-+ .exit = default_device_exit,
-+};
-+
-+/*
-+ * Initialize the DEV module. At boot time this walks the device list and
-+ * unhooks any devices that fail to initialise (normally hardware not
-+ * present) and leaves us with a valid list of present and active devices.
-+ *
-+ */
-+
-+/*
-+ * This is called single threaded during boot, so no need
-+ * to take the rtnl semaphore.
-+ */
-+static int __init net_dev_init(void)
-+{
-+ int i, rc = -ENOMEM;
-+
-+ BUG_ON(!dev_boot_phase);
-+
-+ if (dev_proc_init())
-+ goto out;
-+
-+ if (netdev_kobject_init())
-+ goto out;
-+
-+ INIT_LIST_HEAD(&ptype_all);
-+ for (i = 0; i < PTYPE_HASH_SIZE; i++)
-+ INIT_LIST_HEAD(&ptype_base[i]);
-+
-+ if (register_pernet_subsys(&netdev_net_ops))
-+ goto out;
-+
-+ if (register_pernet_device(&default_device_ops))
-+ goto out;
-+
-+ /*
-+ * Initialise the packet receive queues.
-+ */
-+
-+ for_each_possible_cpu(i) {
-+ struct softnet_data *queue;
-+
-+ queue = &per_cpu(softnet_data, i);
-+ skb_queue_head_init(&queue->input_pkt_queue);
-+ queue->completion_queue = NULL;
-+ INIT_LIST_HEAD(&queue->poll_list);
-+
-+ queue->backlog.poll = process_backlog;
-+ queue->backlog.weight = weight_p;
-+ }
-+
-+ netdev_dma_register();
-+
-+ dev_boot_phase = 0;
-+
-+ open_softirq(NET_TX_SOFTIRQ, net_tx_action);
-+ open_softirq(NET_RX_SOFTIRQ, net_rx_action);
-+
-+ hotcpu_notifier(dev_cpu_callback, 0);
-+ dst_init();
-+ dev_mcast_init();
-+ rc = 0;
-+out:
-+ return rc;
-+}
-+
-+subsys_initcall(net_dev_init);
-+
-+EXPORT_SYMBOL(__dev_get_by_index);
-+EXPORT_SYMBOL(__dev_get_by_name);
-+EXPORT_SYMBOL(__dev_remove_pack);
-+EXPORT_SYMBOL(dev_valid_name);
-+EXPORT_SYMBOL(dev_add_pack);
-+EXPORT_SYMBOL(dev_alloc_name);
-+EXPORT_SYMBOL(dev_close);
-+EXPORT_SYMBOL(dev_get_by_flags);
-+EXPORT_SYMBOL(dev_get_by_index);
-+EXPORT_SYMBOL(dev_get_by_name);
-+EXPORT_SYMBOL(dev_open);
-+EXPORT_SYMBOL(dev_ioctl);
-+EXPORT_SYMBOL(dev_queue_xmit);
-+EXPORT_SYMBOL(dev_remove_pack);
-+EXPORT_SYMBOL(dev_set_allmulti);
-+EXPORT_SYMBOL(dev_set_promiscuity);
-+EXPORT_SYMBOL(dev_change_flags);
-+EXPORT_SYMBOL(dev_set_mtu);
-+EXPORT_SYMBOL(dev_set_mac_address);
-+EXPORT_SYMBOL(free_netdev);
-+EXPORT_SYMBOL(netdev_boot_setup_check);
-+EXPORT_SYMBOL(netdev_set_master);
-+EXPORT_SYMBOL(netdev_state_change);
-+EXPORT_SYMBOL(netif_receive_skb);
-+EXPORT_SYMBOL(__netif_receive_skb);
-+EXPORT_SYMBOL(netif_rx);
-+EXPORT_SYMBOL(register_gifconf);
-+EXPORT_SYMBOL(register_netdevice);
-+EXPORT_SYMBOL(register_netdevice_notifier);
-+
-+/* Click */
-+EXPORT_SYMBOL(register_net_in);
-+EXPORT_SYMBOL(unregister_net_in);
-+
-+EXPORT_SYMBOL(skb_checksum_help);
-+EXPORT_SYMBOL(synchronize_net);
-+EXPORT_SYMBOL(unregister_netdevice);
-+EXPORT_SYMBOL(unregister_netdevice_notifier);
-+EXPORT_SYMBOL(net_enable_timestamp);
-+EXPORT_SYMBOL(net_disable_timestamp);
-+EXPORT_SYMBOL(dev_get_flags);
-+
-+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
-+EXPORT_SYMBOL(br_handle_frame_hook);
-+EXPORT_SYMBOL(br_fdb_get_hook);
-+EXPORT_SYMBOL(br_fdb_put_hook);
-+#endif
-+
-+#ifdef CONFIG_KMOD
-+EXPORT_SYMBOL(dev_load);
-+#endif
-+
-+EXPORT_PER_CPU_SYMBOL(softnet_data);