merge turds
authorMarc Fiuczynski <mef@cs.princeton.edu>
Thu, 19 Oct 2006 03:19:50 +0000 (03:19 +0000)
committerMarc Fiuczynski <mef@cs.princeton.edu>
Thu, 19 Oct 2006 03:19:50 +0000 (03:19 +0000)
lxdialog.scrltmp [deleted file]
mm/fremap.c~ [deleted file]
net/core/dev.c~ [deleted file]
net/ipv4/tcp.c~ [deleted file]

diff --git a/lxdialog.scrltmp b/lxdialog.scrltmp
deleted file mode 100644 (file)
index b4de394..0000000
+++ /dev/null
@@ -1 +0,0 @@
-11
diff --git a/mm/fremap.c~ b/mm/fremap.c~
deleted file mode 100644 (file)
index 9ba4fc6..0000000
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- *   linux/mm/fremap.c
- * 
- * Explicit pagetable population and nonlinear (random) mappings support.
- *
- * started by Ingo Molnar, Copyright (C) 2002, 2003
- */
-
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/file.h>
-#include <linux/mman.h>
-#include <linux/pagemap.h>
-#include <linux/swapops.h>
-#include <linux/rmap.h>
-#include <linux/module.h>
-#include <linux/syscalls.h>
-#include <linux/vs_memory.h>
-
-#include <asm/mmu_context.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-
-static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
-                       unsigned long addr, pte_t *ptep)
-{
-       pte_t pte = *ptep;
-       struct page *page = NULL;
-
-       if (pte_present(pte)) {
-               flush_cache_page(vma, addr, pte_pfn(pte));
-               pte = ptep_clear_flush(vma, addr, ptep);
-               page = vm_normal_page(vma, addr, pte);
-               if (page) {
-                       if (pte_dirty(pte))
-                               set_page_dirty(page);
-                       page_remove_rmap(page);
-                       page_cache_release(page);
-               }
-       } else {
-               if (!pte_file(pte))
-                       free_swap_and_cache(pte_to_swp_entry(pte));
-               pte_clear(mm, addr, ptep);
-       }
-       return !!page;
-}
-
-/*
- * Install a file page to a given virtual memory address, release any
- * previously existing mapping.
- */
-int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
-               unsigned long addr, struct page *page, pgprot_t prot)
-{
-       struct inode *inode;
-       pgoff_t size;
-       int err = -ENOMEM;
-       pte_t *pte;
-       pte_t pte_val;
-       spinlock_t *ptl;
-
-       pte = get_locked_pte(mm, addr, &ptl);
-       if (!pte)
-               goto out;
-
-       /*
-        * This page may have been truncated. Tell the
-        * caller about it.
-        */
-       err = -EINVAL;
-       if (vma->vm_file) {
-               inode = vma->vm_file->f_mapping->host;
-               size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-               if (!page->mapping || page->index >= size)
-                       goto unlock;
-               err = -ENOMEM;
-               if (page_mapcount(page) > INT_MAX/2)
-                       goto unlock;
-       }
-
-       if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
-               inc_mm_counter(mm, file_rss);
-
-       flush_icache_page(vma, page);
-       set_pte_at(mm, addr, pte, mk_pte(page, prot));
-       page_add_file_rmap(page);
-       pte_val = *pte;
-       update_mmu_cache(vma, addr, pte_val);
-       err = 0;
-unlock:
-       pte_unmap_unlock(pte, ptl);
-out:
-       return err;
-}
-EXPORT_SYMBOL(install_page);
-
-/*
- * Install a file pte to a given virtual memory address, release any
- * previously existing mapping.
- */
-int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
-               unsigned long addr, unsigned long pgoff, pgprot_t prot)
-{
-       int err = -ENOMEM;
-       pte_t *pte;
-       pte_t pte_val;
-       spinlock_t *ptl;
-
-       pte = get_locked_pte(mm, addr, &ptl);
-       if (!pte)
-               goto out;
-
-       if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
-               update_hiwater_rss(mm);
-               dec_mm_counter(mm, file_rss);
-       }
-
-       set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
-       pte_val = *pte;
-       update_mmu_cache(vma, addr, pte_val);
-       pte_unmap_unlock(pte, ptl);
-       err = 0;
-out:
-       return err;
-}
-
-/***
- * sys_remap_file_pages - remap arbitrary pages of a shared backing store
- *                        file within an existing vma.
- * @start: start of the remapped virtual memory range
- * @size: size of the remapped virtual memory range
- * @prot: new protection bits of the range
- * @pgoff: to be mapped page of the backing store file
- * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
- *
- * this syscall works purely via pagetables, so it's the most efficient
- * way to map the same (large) file into a given virtual window. Unlike
- * mmap()/mremap() it does not create any new vmas. The new mappings are
- * also safe across swapout.
- *
- * NOTE: the 'prot' parameter right now is ignored, and the vma's default
- * protection is used. Arbitrary protections might be implemented in the
- * future.
- */
-asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
-       unsigned long __prot, unsigned long pgoff, unsigned long flags)
-{
-       struct mm_struct *mm = current->mm;
-       struct address_space *mapping;
-       unsigned long end = start + size;
-       struct vm_area_struct *vma;
-       int err = -EINVAL;
-       int has_write_lock = 0;
-
-       if (__prot)
-               return err;
-       /*
-        * Sanitize the syscall parameters:
-        */
-       start = start & PAGE_MASK;
-       size = size & PAGE_MASK;
-
-       /* Does the address range wrap, or is the span zero-sized? */
-       if (start + size <= start)
-               return err;
-
-       /* Can we represent this offset inside this architecture's pte's? */
-#if PTE_FILE_MAX_BITS < BITS_PER_LONG
-       if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
-               return err;
-#endif
-
-       /* We need down_write() to change vma->vm_flags. */
-       down_read(&mm->mmap_sem);
- retry:
-       vma = find_vma(mm, start);
-
-       /*
-        * Make sure the vma is shared, that it supports prefaulting,
-        * and that the remapped range is valid and fully within
-        * the single existing vma.  vm_private_data is used as a
-        * swapout cursor in a VM_NONLINEAR vma.
-        */
-       if (vma && (vma->vm_flags & VM_SHARED) &&
-               (!vma->vm_private_data || (vma->vm_flags & VM_NONLINEAR)) &&
-               vma->vm_ops && vma->vm_ops->populate &&
-                       end > start && start >= vma->vm_start &&
-                               end <= vma->vm_end) {
-
-               /* Must set VM_NONLINEAR before any pages are populated. */
-               if (pgoff != linear_page_index(vma, start) &&
-                   !(vma->vm_flags & VM_NONLINEAR)) {
-                       if (!has_write_lock) {
-                               up_read(&mm->mmap_sem);
-                               down_write(&mm->mmap_sem);
-                               has_write_lock = 1;
-                               goto retry;
-                       }
-                       mapping = vma->vm_file->f_mapping;
-                       spin_lock(&mapping->i_mmap_lock);
-                       flush_dcache_mmap_lock(mapping);
-                       vma->vm_flags |= VM_NONLINEAR;
-                       vma_prio_tree_remove(vma, &mapping->i_mmap);
-                       vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
-                       flush_dcache_mmap_unlock(mapping);
-                       spin_unlock(&mapping->i_mmap_lock);
-               }
-
-               err = vma->vm_ops->populate(vma, start, size,
-                                           vma->vm_page_prot,
-                                           pgoff, flags & MAP_NONBLOCK);
-
-               /*
-                * We can't clear VM_NONLINEAR because we'd have to do
-                * it after ->populate completes, and that would prevent
-                * downgrading the lock.  (Locks can't be upgraded).
-                */
-       }
-       if (likely(!has_write_lock))
-               up_read(&mm->mmap_sem);
-       else
-               up_write(&mm->mmap_sem);
-
-       return err;
-}
-
diff --git a/net/core/dev.c~ b/net/core/dev.c~
deleted file mode 100644 (file)
index 9fadb9f..0000000
+++ /dev/null
@@ -1,3559 +0,0 @@
-/*
- *     NET3    Protocol independent device support routines.
- *
- *             This program is free software; you can redistribute it and/or
- *             modify it under the terms of the GNU General Public License
- *             as published by the Free Software Foundation; either version
- *             2 of the License, or (at your option) any later version.
- *
- *     Derived from the non IP parts of dev.c 1.0.19
- *             Authors:        Ross Biro
- *                             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *                             Mark Evans, <evansmp@uhura.aston.ac.uk>
- *
- *     Additional Authors:
- *             Florian la Roche <rzsfl@rz.uni-sb.de>
- *             Alan Cox <gw4pts@gw4pts.ampr.org>
- *             David Hinds <dahinds@users.sourceforge.net>
- *             Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
- *             Adam Sulmicki <adam@cfar.umd.edu>
- *              Pekka Riikonen <priikone@poesidon.pspt.fi>
- *
- *     Changes:
- *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
- *                                     to 2 if register_netdev gets called
- *                                     before net_dev_init & also removed a
- *                                     few lines of code in the process.
- *             Alan Cox        :       device private ioctl copies fields back.
- *             Alan Cox        :       Transmit queue code does relevant
- *                                     stunts to keep the queue safe.
- *             Alan Cox        :       Fixed double lock.
- *             Alan Cox        :       Fixed promisc NULL pointer trap
- *             ????????        :       Support the full private ioctl range
- *             Alan Cox        :       Moved ioctl permission check into
- *                                     drivers
- *             Tim Kordas      :       SIOCADDMULTI/SIOCDELMULTI
- *             Alan Cox        :       100 backlog just doesn't cut it when
- *                                     you start doing multicast video 8)
- *             Alan Cox        :       Rewrote net_bh and list manager.
- *             Alan Cox        :       Fix ETH_P_ALL echoback lengths.
- *             Alan Cox        :       Took out transmit every packet pass
- *                                     Saved a few bytes in the ioctl handler
- *             Alan Cox        :       Network driver sets packet type before
- *                                     calling netif_rx. Saves a function
- *                                     call a packet.
- *             Alan Cox        :       Hashed net_bh()
- *             Richard Kooijman:       Timestamp fixes.
- *             Alan Cox        :       Wrong field in SIOCGIFDSTADDR
- *             Alan Cox        :       Device lock protection.
- *             Alan Cox        :       Fixed nasty side effect of device close
- *                                     changes.
- *             Rudi Cilibrasi  :       Pass the right thing to
- *                                     set_mac_address()
- *             Dave Miller     :       32bit quantity for the device lock to
- *                                     make it work out on a Sparc.
- *             Bjorn Ekwall    :       Added KERNELD hack.
- *             Alan Cox        :       Cleaned up the backlog initialise.
- *             Craig Metz      :       SIOCGIFCONF fix if space for under
- *                                     1 device.
- *         Thomas Bogendoerfer :       Return ENODEV for dev_open, if there
- *                                     is no device open function.
- *             Andi Kleen      :       Fix error reporting for SIOCGIFCONF
- *         Michael Chastain    :       Fix signed/unsigned for SIOCGIFCONF
- *             Cyrus Durgin    :       Cleaned for KMOD
- *             Adam Sulmicki   :       Bug Fix : Network Device Unload
- *                                     A network device unload needs to purge
- *                                     the backlog queue.
- *     Paul Rusty Russell      :       SIOCSIFNAME
- *              Pekka Riikonen  :      Netdev boot-time settings code
- *              Andrew Morton   :       Make unregister_netdevice wait
- *                                     indefinitely on dev->refcnt
- *             J Hadi Salim    :       - Backlog queue sampling
- *                                     - netif_rx() feedback
- */
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
-#include <linux/capability.h>
-#include <linux/config.h>
-#include <linux/cpu.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mutex.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <linux/skbuff.h>
-#include <net/sock.h>
-#include <linux/rtnetlink.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/stat.h>
-#include <linux/if_bridge.h>
-#include <linux/divert.h>
-#include <net/dst.h>
-#include <net/pkt_sched.h>
-#include <net/checksum.h>
-#include <linux/highmem.h>
-#include <linux/init.h>
-#include <linux/kmod.h>
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/netpoll.h>
-#include <linux/rcupdate.h>
-#include <linux/delay.h>
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-#include <asm/current.h>
-#include <linux/audit.h>
-#include <linux/err.h>
-
-#ifdef CONFIG_XEN
-#include <net/ip.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
-#endif
-
-/*
- *     The list of packet types we will receive (as opposed to discard)
- *     and the routines to invoke.
- *
- *     Why 16. Because with 16 the only overlap we get on a hash of the
- *     low nibble of the protocol value is RARP/SNAP/X.25.
- *
- *      NOTE:  That is no longer true with the addition of VLAN tags.  Not
- *             sure which should go first, but I bet it won't make much
- *             difference if we are running VLANs.  The good news is that
- *             this protocol won't be in the list unless compiled in, so
- *             the average user (w/out VLANs) will not be adversely affected.
- *             --BLG
- *
- *             0800    IP
- *             8100    802.1Q VLAN
- *             0001    802.3
- *             0002    AX.25
- *             0004    802.2
- *             8035    RARP
- *             0005    SNAP
- *             0805    X.25
- *             0806    ARP
- *             8137    IPX
- *             0009    Localtalk
- *             86DD    IPv6
- */
-
-static DEFINE_SPINLOCK(ptype_lock);
-static struct list_head ptype_base[16];        /* 16 way hashed list */
-static struct list_head ptype_all;             /* Taps */
-
-/*
- * The @dev_base list is protected by @dev_base_lock and the rtnl
- * semaphore.
- *
- * Pure readers hold dev_base_lock for reading.
- *
- * Writers must hold the rtnl semaphore while they loop through the
- * dev_base list, and hold dev_base_lock for writing when they do the
- * actual updates.  This allows pure readers to access the list even
- * while a writer is preparing to update it.
- *
- * To put it another way, dev_base_lock is held for writing only to
- * protect against pure readers; the rtnl semaphore provides the
- * protection against other writers.
- *
- * See, for example usages, register_netdevice() and
- * unregister_netdevice(), which must be called with the rtnl
- * semaphore held.
- */
-struct net_device *dev_base;
-static struct net_device **dev_tail = &dev_base;
-DEFINE_RWLOCK(dev_base_lock);
-
-EXPORT_SYMBOL(dev_base);
-EXPORT_SYMBOL(dev_base_lock);
-
-#define NETDEV_HASHBITS        8
-static struct hlist_head dev_name_head[1<<NETDEV_HASHBITS];
-static struct hlist_head dev_index_head[1<<NETDEV_HASHBITS];
-
-static inline struct hlist_head *dev_name_hash(const char *name)
-{
-       unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
-       return &dev_name_head[hash & ((1<<NETDEV_HASHBITS)-1)];
-}
-
-static inline struct hlist_head *dev_index_hash(int ifindex)
-{
-       return &dev_index_head[ifindex & ((1<<NETDEV_HASHBITS)-1)];
-}
-
-/*
- *     Our notifier list
- */
-
-static RAW_NOTIFIER_HEAD(netdev_chain);
-
-/*
- *     Device drivers call our routines to queue packets here. We empty the
- *     queue in the local softnet handler.
- */
-DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL };
-
-#ifdef CONFIG_SYSFS
-extern int netdev_sysfs_init(void);
-extern int netdev_register_sysfs(struct net_device *);
-extern void netdev_unregister_sysfs(struct net_device *);
-#else
-#define netdev_sysfs_init()            (0)
-#define netdev_register_sysfs(dev)     (0)
-#define        netdev_unregister_sysfs(dev)    do { } while(0)
-#endif
-
-
-/*******************************************************************************
-
-               Protocol management and registration routines
-
-*******************************************************************************/
-
-/*
- *     For efficiency
- */
-
-int netdev_nit;
-
-/*
- *     Add a protocol ID to the list. Now that the input handler is
- *     smarter we can dispense with all the messy stuff that used to be
- *     here.
- *
- *     BEWARE!!! Protocol handlers, mangling input packets,
- *     MUST BE last in hash buckets and checking protocol handlers
- *     MUST start from promiscuous ptype_all chain in net_bh.
- *     It is true now, do not change it.
- *     Explanation follows: if protocol handler, mangling packet, will
- *     be the first on list, it is not able to sense, that packet
- *     is cloned and should be copied-on-write, so that it will
- *     change it and subsequent readers will get broken packet.
- *                                                     --ANK (980803)
- */
-
-/**
- *     dev_add_pack - add packet handler
- *     @pt: packet type declaration
- *
- *     Add a protocol handler to the networking stack. The passed &packet_type
- *     is linked into kernel lists and may not be freed until it has been
- *     removed from the kernel lists.
- *
- *     This call does not sleep therefore it can not 
- *     guarantee all CPU's that are in middle of receiving packets
- *     will see the new packet type (until the next received packet).
- */
-
-void dev_add_pack(struct packet_type *pt)
-{
-       int hash;
-
-       spin_lock_bh(&ptype_lock);
-       if (pt->type == htons(ETH_P_ALL)) {
-               netdev_nit++;
-               list_add_rcu(&pt->list, &ptype_all);
-       } else {
-               hash = ntohs(pt->type) & 15;
-               list_add_rcu(&pt->list, &ptype_base[hash]);
-       }
-       spin_unlock_bh(&ptype_lock);
-}
-
-/**
- *     __dev_remove_pack        - remove packet handler
- *     @pt: packet type declaration
- *
- *     Remove a protocol handler that was previously added to the kernel
- *     protocol handlers by dev_add_pack(). The passed &packet_type is removed
- *     from the kernel lists and can be freed or reused once this function
- *     returns. 
- *
- *      The packet type might still be in use by receivers
- *     and must not be freed until after all the CPU's have gone
- *     through a quiescent state.
- */
-void __dev_remove_pack(struct packet_type *pt)
-{
-       struct list_head *head;
-       struct packet_type *pt1;
-
-       spin_lock_bh(&ptype_lock);
-
-       if (pt->type == htons(ETH_P_ALL)) {
-               netdev_nit--;
-               head = &ptype_all;
-       } else
-               head = &ptype_base[ntohs(pt->type) & 15];
-
-       list_for_each_entry(pt1, head, list) {
-               if (pt == pt1) {
-                       list_del_rcu(&pt->list);
-                       goto out;
-               }
-       }
-
-       printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
-out:
-       spin_unlock_bh(&ptype_lock);
-}
-/**
- *     dev_remove_pack  - remove packet handler
- *     @pt: packet type declaration
- *
- *     Remove a protocol handler that was previously added to the kernel
- *     protocol handlers by dev_add_pack(). The passed &packet_type is removed
- *     from the kernel lists and can be freed or reused once this function
- *     returns.
- *
- *     This call sleeps to guarantee that no CPU is looking at the packet
- *     type after return.
- */
-void dev_remove_pack(struct packet_type *pt)
-{
-       __dev_remove_pack(pt);
-       
-       synchronize_net();
-}
-
-/******************************************************************************
-
-                     Device Boot-time Settings Routines
-
-*******************************************************************************/
-
-/* Boot time configuration table */
-static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
-
-/**
- *     netdev_boot_setup_add   - add new setup entry
- *     @name: name of the device
- *     @map: configured settings for the device
- *
- *     Adds new setup entry to the dev_boot_setup list.  The function
- *     returns 0 on error and 1 on success.  This is a generic routine to
- *     all netdevices.
- */
-static int netdev_boot_setup_add(char *name, struct ifmap *map)
-{
-       struct netdev_boot_setup *s;
-       int i;
-
-       s = dev_boot_setup;
-       for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
-               if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
-                       memset(s[i].name, 0, sizeof(s[i].name));
-                       strcpy(s[i].name, name);
-                       memcpy(&s[i].map, map, sizeof(s[i].map));
-                       break;
-               }
-       }
-
-       return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
-}
-
-/**
- *     netdev_boot_setup_check - check boot time settings
- *     @dev: the netdevice
- *
- *     Check boot time settings for the device.
- *     The found settings are set for the device to be used
- *     later in the device probing.
- *     Returns 0 if no settings found, 1 if they are.
- */
-int netdev_boot_setup_check(struct net_device *dev)
-{
-       struct netdev_boot_setup *s = dev_boot_setup;
-       int i;
-
-       for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
-               if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
-                   !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
-                       dev->irq        = s[i].map.irq;
-                       dev->base_addr  = s[i].map.base_addr;
-                       dev->mem_start  = s[i].map.mem_start;
-                       dev->mem_end    = s[i].map.mem_end;
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-
-/**
- *     netdev_boot_base        - get address from boot time settings
- *     @prefix: prefix for network device
- *     @unit: id for network device
- *
- *     Check boot time settings for the base address of device.
- *     The found settings are set for the device to be used
- *     later in the device probing.
- *     Returns 0 if no settings found.
- */
-unsigned long netdev_boot_base(const char *prefix, int unit)
-{
-       const struct netdev_boot_setup *s = dev_boot_setup;
-       char name[IFNAMSIZ];
-       int i;
-
-       sprintf(name, "%s%d", prefix, unit);
-
-       /*
-        * If device already registered then return base of 1
-        * to indicate not to probe for this interface
-        */
-       if (__dev_get_by_name(name))
-               return 1;
-
-       for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
-               if (!strcmp(name, s[i].name))
-                       return s[i].map.base_addr;
-       return 0;
-}
-
-/*
- * Saves at boot time configured settings for any netdevice.
- */
-int __init netdev_boot_setup(char *str)
-{
-       int ints[5];
-       struct ifmap map;
-
-       str = get_options(str, ARRAY_SIZE(ints), ints);
-       if (!str || !*str)
-               return 0;
-
-       /* Save settings */
-       memset(&map, 0, sizeof(map));
-       if (ints[0] > 0)
-               map.irq = ints[1];
-       if (ints[0] > 1)
-               map.base_addr = ints[2];
-       if (ints[0] > 2)
-               map.mem_start = ints[3];
-       if (ints[0] > 3)
-               map.mem_end = ints[4];
-
-       /* Add new entry to the list */
-       return netdev_boot_setup_add(str, &map);
-}
-
-__setup("netdev=", netdev_boot_setup);
-
-/*******************************************************************************
-
-                           Device Interface Subroutines
-
-*******************************************************************************/
-
-/**
- *     __dev_get_by_name       - find a device by its name
- *     @name: name to find
- *
- *     Find an interface by name. Must be called under RTNL semaphore
- *     or @dev_base_lock. If the name is found a pointer to the device
- *     is returned. If the name is not found then %NULL is returned. The
- *     reference counters are not incremented so the caller must be
- *     careful with locks.
- */
-
-struct net_device *__dev_get_by_name(const char *name)
-{
-       struct hlist_node *p;
-
-       hlist_for_each(p, dev_name_hash(name)) {
-               struct net_device *dev
-                       = hlist_entry(p, struct net_device, name_hlist);
-               if (!strncmp(dev->name, name, IFNAMSIZ))
-                       return dev;
-       }
-       return NULL;
-}
-
-/**
- *     dev_get_by_name         - find a device by its name
- *     @name: name to find
- *
- *     Find an interface by name. This can be called from any
- *     context and does its own locking. The returned handle has
- *     the usage count incremented and the caller must use dev_put() to
- *     release it when it is no longer needed. %NULL is returned if no
- *     matching device is found.
- */
-
-struct net_device *dev_get_by_name(const char *name)
-{
-       struct net_device *dev;
-
-       read_lock(&dev_base_lock);
-       dev = __dev_get_by_name(name);
-       if (dev)
-               dev_hold(dev);
-       read_unlock(&dev_base_lock);
-       return dev;
-}
-
-/**
- *     __dev_get_by_index - find a device by its ifindex
- *     @ifindex: index of device
- *
- *     Search for an interface by index. Returns %NULL if the device
- *     is not found or a pointer to the device. The device has not
- *     had its reference counter increased so the caller must be careful
- *     about locking. The caller must hold either the RTNL semaphore
- *     or @dev_base_lock.
- */
-
-struct net_device *__dev_get_by_index(int ifindex)
-{
-       struct hlist_node *p;
-
-       hlist_for_each(p, dev_index_hash(ifindex)) {
-               struct net_device *dev
-                       = hlist_entry(p, struct net_device, index_hlist);
-               if (dev->ifindex == ifindex)
-                       return dev;
-       }
-       return NULL;
-}
-
-
-/**
- *     dev_get_by_index - find a device by its ifindex
- *     @ifindex: index of device
- *
- *     Search for an interface by index. Returns NULL if the device
- *     is not found or a pointer to the device. The device returned has
- *     had a reference added and the pointer is safe until the user calls
- *     dev_put to indicate they have finished with it.
- */
-
-struct net_device *dev_get_by_index(int ifindex)
-{
-       struct net_device *dev;
-
-       read_lock(&dev_base_lock);
-       dev = __dev_get_by_index(ifindex);
-       if (dev)
-               dev_hold(dev);
-       read_unlock(&dev_base_lock);
-       return dev;
-}
-
-/**
- *     dev_getbyhwaddr - find a device by its hardware address
- *     @type: media type of device
- *     @ha: hardware address
- *
- *     Search for an interface by MAC address. Returns NULL if the device
- *     is not found or a pointer to the device. The caller must hold the
- *     rtnl semaphore. The returned device has not had its ref count increased
- *     and the caller must therefore be careful about locking
- *
- *     BUGS:
- *     If the API was consistent this would be __dev_get_by_hwaddr
- */
-
-struct net_device *dev_getbyhwaddr(unsigned short type, char *ha)
-{
-       struct net_device *dev;
-
-       ASSERT_RTNL();
-
-       for (dev = dev_base; dev; dev = dev->next)
-               if (dev->type == type &&
-                   !memcmp(dev->dev_addr, ha, dev->addr_len))
-                       break;
-       return dev;
-}
-
-EXPORT_SYMBOL(dev_getbyhwaddr);
-
-struct net_device *dev_getfirstbyhwtype(unsigned short type)
-{
-       struct net_device *dev;
-
-       rtnl_lock();
-       for (dev = dev_base; dev; dev = dev->next) {
-               if (dev->type == type) {
-                       dev_hold(dev);
-                       break;
-               }
-       }
-       rtnl_unlock();
-       return dev;
-}
-
-EXPORT_SYMBOL(dev_getfirstbyhwtype);
-
-/**
- *     dev_get_by_flags - find any device with given flags
- *     @if_flags: IFF_* values
- *     @mask: bitmask of bits in if_flags to check
- *
- *     Search for any interface with the given flags. Returns NULL if a device
- *     is not found or a pointer to the device. The device returned has 
- *     had a reference added and the pointer is safe until the user calls
- *     dev_put to indicate they have finished with it.
- */
-
-struct net_device * dev_get_by_flags(unsigned short if_flags, unsigned short mask)
-{
-       struct net_device *dev;
-
-       read_lock(&dev_base_lock);
-       for (dev = dev_base; dev != NULL; dev = dev->next) {
-               if (((dev->flags ^ if_flags) & mask) == 0) {
-                       dev_hold(dev);
-                       break;
-               }
-       }
-       read_unlock(&dev_base_lock);
-       return dev;
-}
-
-/**
- *     dev_valid_name - check if name is okay for network device
- *     @name: name string
- *
- *     Network device names need to be valid file names to
- *     to allow sysfs to work
- */
-int dev_valid_name(const char *name)
-{
-       return !(*name == '\0' 
-                || !strcmp(name, ".")
-                || !strcmp(name, "..")
-                || strchr(name, '/'));
-}
-
-/**
- *     dev_alloc_name - allocate a name for a device
- *     @dev: device
- *     @name: name format string
- *
- *     Passed a format string - eg "lt%d" it will try and find a suitable
- *     id. It scans list of devices to build up a free map, then chooses
- *     the first empty slot. The caller must hold the dev_base or rtnl lock
- *     while allocating the name and adding the device in order to avoid
- *     duplicates.
- *     Limited to bits_per_byte * page size devices (ie 32K on most platforms).
- *     Returns the number of the unit assigned or a negative errno code.
- */
-
-int dev_alloc_name(struct net_device *dev, const char *name)
-{
-       int i = 0;
-       char buf[IFNAMSIZ];
-       const char *p;
-       const int max_netdevices = 8*PAGE_SIZE;
-       long *inuse;
-       struct net_device *d;
-
-       p = strnchr(name, IFNAMSIZ-1, '%');
-       if (p) {
-               /*
-                * Verify the string as this thing may have come from
-                * the user.  There must be either one "%d" and no other "%"
-                * characters.
-                */
-               if (p[1] != 'd' || strchr(p + 2, '%'))
-                       return -EINVAL;
-
-               /* Use one page as a bit array of possible slots */
-               inuse = (long *) get_zeroed_page(GFP_ATOMIC);
-               if (!inuse)
-                       return -ENOMEM;
-
-               for (d = dev_base; d; d = d->next) {
-                       if (!sscanf(d->name, name, &i))
-                               continue;
-                       if (i < 0 || i >= max_netdevices)
-                               continue;
-
-                       /*  avoid cases where sscanf is not exact inverse of printf */
-                       snprintf(buf, sizeof(buf), name, i);
-                       if (!strncmp(buf, d->name, IFNAMSIZ))
-                               set_bit(i, inuse);
-               }
-
-               i = find_first_zero_bit(inuse, max_netdevices);
-               free_page((unsigned long) inuse);
-       }
-
-       snprintf(buf, sizeof(buf), name, i);
-       if (!__dev_get_by_name(buf)) {
-               strlcpy(dev->name, buf, IFNAMSIZ);
-               return i;
-       }
-
-       /* It is possible to run out of possible slots
-        * when the name is long and there isn't enough space left
-        * for the digits, or if all bits are used.
-        */
-       return -ENFILE;
-}
-
-
-/**
- *     dev_change_name - change name of a device
- *     @dev: device
- *     @newname: name (or format string) must be at least IFNAMSIZ
- *
- *     Change name of a device, can pass format strings "eth%d".
- *     for wildcarding.
- */
-int dev_change_name(struct net_device *dev, char *newname)
-{
-       int err = 0;
-
-       ASSERT_RTNL();
-
-       if (dev->flags & IFF_UP)
-               return -EBUSY;
-
-       if (!dev_valid_name(newname))
-               return -EINVAL;
-
-       if (strchr(newname, '%')) {
-               err = dev_alloc_name(dev, newname);
-               if (err < 0)
-                       return err;
-               strcpy(newname, dev->name);
-       }
-       else if (__dev_get_by_name(newname))
-               return -EEXIST;
-       else
-               strlcpy(dev->name, newname, IFNAMSIZ);
-
-       err = class_device_rename(&dev->class_dev, dev->name);
-       if (!err) {
-               hlist_del(&dev->name_hlist);
-               hlist_add_head(&dev->name_hlist, dev_name_hash(dev->name));
-               raw_notifier_call_chain(&netdev_chain,
-                               NETDEV_CHANGENAME, dev);
-       }
-
-       return err;
-}
-
-/**
- *     netdev_features_change - device changes features
- *     @dev: device to cause notification
- *
- *     Called to indicate a device has changed features.
- */
-void netdev_features_change(struct net_device *dev)
-{
-       raw_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
-}
-EXPORT_SYMBOL(netdev_features_change);
-
-/**
- *     netdev_state_change - device changes state
- *     @dev: device to cause notification
- *
- *     Called to indicate a device has changed state. This function calls
- *     the notifier chains for netdev_chain and sends a NEWLINK message
- *     to the routing socket.
- */
-void netdev_state_change(struct net_device *dev)
-{
-       if (dev->flags & IFF_UP) {
-               raw_notifier_call_chain(&netdev_chain,
-                               NETDEV_CHANGE, dev);
-               rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
-       }
-}
-
-/**
- *     dev_load        - load a network module
- *     @name: name of interface
- *
- *     If a network interface is not present and the process has suitable
- *     privileges this function loads the module. If module loading is not
- *     available in this kernel then it becomes a nop.
- */
-
-void dev_load(const char *name)
-{
-       struct net_device *dev;  
-
-       read_lock(&dev_base_lock);
-       dev = __dev_get_by_name(name);
-       read_unlock(&dev_base_lock);
-
-       if (!dev && capable(CAP_SYS_MODULE))
-               request_module("%s", name);
-}
-
-static int default_rebuild_header(struct sk_buff *skb)
-{
-       printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
-              skb->dev ? skb->dev->name : "NULL!!!");
-       kfree_skb(skb);
-       return 1;
-}
-
-
-/**
- *     dev_open        - prepare an interface for use.
- *     @dev:   device to open
- *
- *     Takes a device from down to up state. The device's private open
- *     function is invoked and then the multicast lists are loaded. Finally
- *     the device is moved into the up state and a %NETDEV_UP message is
- *     sent to the netdev notifier chain.
- *
- *     Calling this function on an active interface is a nop. On a failure
- *     a negative errno code is returned.
- */
-int dev_open(struct net_device *dev)
-{
-       int ret = 0;
-
-       /*
-        *      Is it already up?
-        */
-
-       if (dev->flags & IFF_UP)
-               return 0;
-
-       /*
-        *      Is it even present?
-        */
-       if (!netif_device_present(dev))
-               return -ENODEV;
-
-       /*
-        *      Call device private open method
-        */
-       set_bit(__LINK_STATE_START, &dev->state);
-       if (dev->open) {
-               ret = dev->open(dev);
-               if (ret)
-                       clear_bit(__LINK_STATE_START, &dev->state);
-       }
-
-       /*
-        *      If it went open OK then:
-        */
-
-       if (!ret) {
-               /*
-                *      Set the flags.
-                */
-               dev->flags |= IFF_UP;
-
-               /*
-                *      Initialize multicasting status
-                */
-               dev_mc_upload(dev);
-
-               /*
-                *      Wakeup transmit queue engine
-                */
-               dev_activate(dev);
-
-               /*
-                *      ... and announce new interface.
-                */
-               raw_notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
-       }
-       return ret;
-}
-
-/**
- *     dev_close - shutdown an interface.
- *     @dev: device to shutdown
- *
- *     This function moves an active device into down state. A
- *     %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
- *     is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
- *     chain.
- */
-int dev_close(struct net_device *dev)
-{
-       if (!(dev->flags & IFF_UP))
-               return 0;
-
-       /*
-        *      Tell people we are going down, so that they can
-        *      prepare to death, when device is still operating.
-        */
-       raw_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
-
-       dev_deactivate(dev);
-
-       clear_bit(__LINK_STATE_START, &dev->state);
-
-       /* Synchronize to scheduled poll. We cannot touch poll list,
-        * it can be even on different cpu. So just clear netif_running(),
-        * and wait when poll really will happen. Actually, the best place
-        * for this is inside dev->stop() after device stopped its irq
-        * engine, but this requires more changes in devices. */
-
-       smp_mb__after_clear_bit(); /* Commit netif_running(). */
-       while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
-               /* No hurry. */
-               msleep(1);
-       }
-
-       /*
-        *      Call the device specific close. This cannot fail.
-        *      Only if device is UP
-        *
-        *      We allow it to be called even after a DETACH hot-plug
-        *      event.
-        */
-       if (dev->stop)
-               dev->stop(dev);
-
-       /*
-        *      Device is now down.
-        */
-
-       dev->flags &= ~IFF_UP;
-
-       /*
-        * Tell people we are down
-        */
-       raw_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
-
-       return 0;
-}
-
-
-/*
- *     Device change register/unregister. These are not inline or static
- *     as we export them to the world.
- */
-
-/**
- *     register_netdevice_notifier - register a network notifier block
- *     @nb: notifier
- *
- *     Register a notifier to be called when network device events occur.
- *     The notifier passed is linked into the kernel structures and must
- *     not be reused until it has been unregistered. A negative errno code
- *     is returned on a failure.
- *
- *     When registered all registration and up events are replayed
- *     to the new notifier to allow device to have a race free 
- *     view of the network device list.
- */
-
-int register_netdevice_notifier(struct notifier_block *nb)
-{
-       struct net_device *dev;
-       int err;
-
-       rtnl_lock();
-       err = raw_notifier_chain_register(&netdev_chain, nb);
-       if (!err) {
-               for (dev = dev_base; dev; dev = dev->next) {
-                       nb->notifier_call(nb, NETDEV_REGISTER, dev);
-
-                       if (dev->flags & IFF_UP) 
-                               nb->notifier_call(nb, NETDEV_UP, dev);
-               }
-       }
-       rtnl_unlock();
-       return err;
-}
-
-/**
- *     unregister_netdevice_notifier - unregister a network notifier block
- *     @nb: notifier
- *
- *     Unregister a notifier previously registered by
- *     register_netdevice_notifier(). The notifier is unlinked into the
- *     kernel structures and may then be reused. A negative errno code
- *     is returned on a failure.
- */
-
-int unregister_netdevice_notifier(struct notifier_block *nb)
-{
-       int err;
-
-       rtnl_lock();
-       err = raw_notifier_chain_unregister(&netdev_chain, nb);
-       rtnl_unlock();
-       return err;
-}
-
-/**
- *     call_netdevice_notifiers - call all network notifier blocks
- *      @val: value passed unmodified to notifier function
- *      @v:   pointer passed unmodified to notifier function
- *
- *     Call all network notifier blocks.  Parameters and return value
- *     are as for raw_notifier_call_chain().
- */
-
-int call_netdevice_notifiers(unsigned long val, void *v)
-{
-       return raw_notifier_call_chain(&netdev_chain, val, v);
-}
-
-/* When > 0 there are consumers of rx skb time stamps */
-static atomic_t netstamp_needed = ATOMIC_INIT(0);
-
-void net_enable_timestamp(void)
-{
-       atomic_inc(&netstamp_needed);
-}
-
-void net_disable_timestamp(void)
-{
-       atomic_dec(&netstamp_needed);
-}
-
-void __net_timestamp(struct sk_buff *skb)
-{
-       struct timeval tv;
-
-       do_gettimeofday(&tv);
-       skb_set_timestamp(skb, &tv);
-}
-EXPORT_SYMBOL(__net_timestamp);
-
-static inline void net_timestamp(struct sk_buff *skb)
-{
-       if (atomic_read(&netstamp_needed))
-               __net_timestamp(skb);
-       else {
-               skb->tstamp.off_sec = 0;
-               skb->tstamp.off_usec = 0;
-       }
-}
-
-/*
- *     Support routine. Sends outgoing frames to any network
- *     taps currently in use.
- */
-
-static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct packet_type *ptype;
-
-       net_timestamp(skb);
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(ptype, &ptype_all, list) {
-               /* Never send packets back to the socket
-                * they originated from - MvS (miquels@drinkel.ow.org)
-                */
-               if ((ptype->dev == dev || !ptype->dev) &&
-                   (ptype->af_packet_priv == NULL ||
-                    (struct sock *)ptype->af_packet_priv != skb->sk)) {
-                       struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
-                       if (!skb2)
-                               break;
-
-                       /* skb->nh should be correctly
-                          set by sender, so that the second statement is
-                          just protection against buggy protocols.
-                        */
-                       skb2->mac.raw = skb2->data;
-
-                       if (skb2->nh.raw < skb2->data ||
-                           skb2->nh.raw > skb2->tail) {
-                               if (net_ratelimit())
-                                       printk(KERN_CRIT "protocol %04x is "
-                                              "buggy, dev %s\n",
-                                              skb2->protocol, dev->name);
-                               skb2->nh.raw = skb2->data;
-                       }
-
-                       skb2->h.raw = skb2->nh.raw;
-                       skb2->pkt_type = PACKET_OUTGOING;
-                       ptype->func(skb2, skb->dev, ptype, skb->dev);
-               }
-       }
-       rcu_read_unlock();
-}
-
-
-void __netif_schedule(struct net_device *dev)
-{
-       if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
-               unsigned long flags;
-               struct softnet_data *sd;
-
-               local_irq_save(flags);
-               sd = &__get_cpu_var(softnet_data);
-               dev->next_sched = sd->output_queue;
-               sd->output_queue = dev;
-               raise_softirq_irqoff(NET_TX_SOFTIRQ);
-               local_irq_restore(flags);
-       }
-}
-EXPORT_SYMBOL(__netif_schedule);
-
-void __netif_rx_schedule(struct net_device *dev)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       dev_hold(dev);
-       list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
-       if (dev->quota < 0)
-               dev->quota += dev->weight;
-       else
-               dev->quota = dev->weight;
-       __raise_softirq_irqoff(NET_RX_SOFTIRQ);
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL(__netif_rx_schedule);
-
-void dev_kfree_skb_any(struct sk_buff *skb)
-{
-       if (in_irq() || irqs_disabled())
-               dev_kfree_skb_irq(skb);
-       else
-               dev_kfree_skb(skb);
-}
-EXPORT_SYMBOL(dev_kfree_skb_any);
-
-
-/* Hot-plugging. */
-void netif_device_detach(struct net_device *dev)
-{
-       if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
-           netif_running(dev)) {
-               netif_stop_queue(dev);
-       }
-}
-EXPORT_SYMBOL(netif_device_detach);
-
-void netif_device_attach(struct net_device *dev)
-{
-       if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
-           netif_running(dev)) {
-               netif_wake_queue(dev);
-               __netdev_watchdog_up(dev);
-       }
-}
-EXPORT_SYMBOL(netif_device_attach);
-
-
-/*
- * Invalidate hardware checksum when packet is to be mangled, and
- * complete checksum manually on outgoing path.
- */
-int skb_checksum_help(struct sk_buff *skb, int inward)
-{
-       unsigned int csum;
-       int ret = 0, offset = skb->h.raw - skb->data;
-
-       if (inward) {
-               skb->ip_summed = CHECKSUM_NONE;
-               goto out;
-       }
-
-       if (skb_cloned(skb)) {
-               ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-               if (ret)
-                       goto out;
-       }
-
-       BUG_ON(offset > (int)skb->len);
-       csum = skb_checksum(skb, offset, skb->len-offset, 0);
-
-       offset = skb->tail - skb->h.raw;
-       BUG_ON(offset <= 0);
-       BUG_ON(skb->csum + 2 > offset);
-
-       *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum);
-       skb->ip_summed = CHECKSUM_NONE;
-out:   
-       return ret;
-}
-
-/**
- *     skb_gso_segment - Perform segmentation on skb.
- *     @skb: buffer to segment
- *     @features: features for the output path (see dev->features)
- *
- *     This function segments the given skb and returns a list of segments.
- *
- *     It may return NULL if the skb requires no segmentation.  This is
- *     only possible when GSO is used for verifying header integrity.
- */
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
-{
-       struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
-       struct packet_type *ptype;
-       int type = skb->protocol;
-
-       BUG_ON(skb_shinfo(skb)->frag_list);
-       BUG_ON(skb->ip_summed != CHECKSUM_HW);
-
-       skb->mac.raw = skb->data;
-       skb->mac_len = skb->nh.raw - skb->data;
-       __skb_pull(skb, skb->mac_len);
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
-               if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
-                       segs = ptype->gso_segment(skb, features);
-                       break;
-               }
-       }
-       rcu_read_unlock();
-
-       __skb_push(skb, skb->data - skb->mac.raw);
-
-       return segs;
-}
-
-EXPORT_SYMBOL(skb_gso_segment);
-
-/* Take action when hardware reception checksum errors are detected. */
-#ifdef CONFIG_BUG
-void netdev_rx_csum_fault(struct net_device *dev)
-{
-       if (net_ratelimit()) {
-               printk(KERN_ERR "%s: hw csum failure.\n", 
-                       dev ? dev->name : "<unknown>");
-               dump_stack();
-       }
-}
-EXPORT_SYMBOL(netdev_rx_csum_fault);
-#endif
-
-#ifdef CONFIG_HIGHMEM
-/* Actually, we should eliminate this check as soon as we know, that:
- * 1. IOMMU is present and allows to map all the memory.
- * 2. No high memory really exists on this machine.
- */
-
-static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
-{
-       int i;
-
-       if (dev->features & NETIF_F_HIGHDMA)
-               return 0;
-
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-               if (PageHighMem(skb_shinfo(skb)->frags[i].page))
-                       return 1;
-
-       return 0;
-}
-#else
-#define illegal_highdma(dev, skb)      (0)
-#endif
-
-struct dev_gso_cb {
-       void (*destructor)(struct sk_buff *skb);
-};
-
-#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
-
-static void dev_gso_skb_destructor(struct sk_buff *skb)
-{
-       struct dev_gso_cb *cb;
-
-       do {
-               struct sk_buff *nskb = skb->next;
-
-               skb->next = nskb->next;
-               nskb->next = NULL;
-               kfree_skb(nskb);
-       } while (skb->next);
-
-       cb = DEV_GSO_CB(skb);
-       if (cb->destructor)
-               cb->destructor(skb);
-}
-
-/**
- *     dev_gso_segment - Perform emulated hardware segmentation on skb.
- *     @skb: buffer to segment
- *
- *     This function segments the given skb and stores the list of segments
- *     in skb->next.
- */
-static int dev_gso_segment(struct sk_buff *skb)
-{
-       struct net_device *dev = skb->dev;
-       struct sk_buff *segs;
-       int features = dev->features & ~(illegal_highdma(dev, skb) ?
-                                        NETIF_F_SG : 0);
-
-       segs = skb_gso_segment(skb, features);
-
-       /* Verifying header integrity only. */
-       if (!segs)
-               return 0;
-
-       if (unlikely(IS_ERR(segs)))
-               return PTR_ERR(segs);
-
-       skb->next = segs;
-       DEV_GSO_CB(skb)->destructor = skb->destructor;
-       skb->destructor = dev_gso_skb_destructor;
-
-       return 0;
-}
-
-int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       if (likely(!skb->next)) {
-               if (netdev_nit)
-                       dev_queue_xmit_nit(skb, dev);
-
-               if (netif_needs_gso(dev, skb)) {
-                       if (unlikely(dev_gso_segment(skb)))
-                               goto out_kfree_skb;
-                       if (skb->next)
-                               goto gso;
-               }
-
-               return dev->hard_start_xmit(skb, dev);
-       }
-
-gso:
-       do {
-               struct sk_buff *nskb = skb->next;
-               int rc;
-
-               skb->next = nskb->next;
-               nskb->next = NULL;
-               rc = dev->hard_start_xmit(nskb, dev);
-               if (unlikely(rc)) {
-                       nskb->next = skb->next;
-                       skb->next = nskb;
-                       return rc;
-               }
-               if (unlikely(netif_queue_stopped(dev) && skb->next))
-                       return NETDEV_TX_BUSY;
-       } while (skb->next);
-       
-       skb->destructor = DEV_GSO_CB(skb)->destructor;
-
-out_kfree_skb:
-       kfree_skb(skb);
-       return 0;
-}
-
-#define HARD_TX_LOCK(dev, cpu) {                       \
-       if ((dev->features & NETIF_F_LLTX) == 0) {      \
-               netif_tx_lock(dev);                     \
-       }                                               \
-}
-
-#define HARD_TX_UNLOCK(dev) {                          \
-       if ((dev->features & NETIF_F_LLTX) == 0) {      \
-               netif_tx_unlock(dev);                   \
-       }                                               \
-}
-
-#ifdef CONFIG_XEN
-inline int skb_checksum_setup(struct sk_buff *skb)
-{
-       if (skb->proto_csum_blank) {
-               if (skb->protocol != htons(ETH_P_IP))
-                       goto out;
-               skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
-               if (skb->h.raw >= skb->tail)
-                       goto out;
-               switch (skb->nh.iph->protocol) {
-               case IPPROTO_TCP:
-                       skb->csum = offsetof(struct tcphdr, check);
-                       break;
-               case IPPROTO_UDP:
-                       skb->csum = offsetof(struct udphdr, check);
-                       break;
-               default:
-                       if (net_ratelimit())
-                               printk(KERN_ERR "Attempting to checksum a non-"
-                                      "TCP/UDP packet, dropping a protocol"
-                                      " %d packet", skb->nh.iph->protocol);
-                       goto out;
-               }
-               if ((skb->h.raw + skb->csum + 2) > skb->tail)
-                       goto out;
-               skb->ip_summed = CHECKSUM_HW;
-               skb->proto_csum_blank = 0;
-       }
-       return 0;
-out:
-       return -EPROTO;
-}
-#else
-inline int skb_checksum_setup(struct sk_buff *skb) { return 0; }
-#endif
-
-
-/**
- *     dev_queue_xmit - transmit a buffer
- *     @skb: buffer to transmit
- *
- *     Queue a buffer for transmission to a network device. The caller must
- *     have set the device and priority and built the buffer before calling
- *     this function. The function can be called from an interrupt.
- *
- *     A negative errno code is returned on a failure. A success does not
- *     guarantee the frame will be transmitted as it may be dropped due
- *     to congestion or traffic shaping.
- *
- * -----------------------------------------------------------------------------------
- *      I notice this method can also return errors from the queue disciplines,
- *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
- *      be positive.
- *
- *      Regardless of the return value, the skb is consumed, so it is currently
- *      difficult to retry a send to this method.  (You can bump the ref count
- *      before sending to hold a reference for retry if you are careful.)
- *
- *      When calling this method, interrupts MUST be enabled.  This is because
- *      the BH enable code must have IRQs enabled so that it will not deadlock.
- *          --BLG
- */
-
-int dev_queue_xmit(struct sk_buff *skb)
-{
-       struct net_device *dev = skb->dev;
-       struct Qdisc *q;
-       int rc = -ENOMEM;
-
-       /* If a checksum-deferred packet is forwarded to a device that needs a
-        * checksum, correct the pointers and force checksumming.
-        */
-       if (skb_checksum_setup(skb))
-               goto out_kfree_skb;
-
-       /* GSO will handle the following emulations directly. */
-       if (netif_needs_gso(dev, skb))
-               goto gso;
-
-       if (skb_shinfo(skb)->frag_list &&
-           !(dev->features & NETIF_F_FRAGLIST) &&
-           __skb_linearize(skb))
-               goto out_kfree_skb;
-
-       /* Fragmented skb is linearized if device does not support SG,
-        * or if at least one of fragments is in highmem and device
-        * does not support DMA from it.
-        */
-       if (skb_shinfo(skb)->nr_frags &&
-           (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
-           __skb_linearize(skb))
-               goto out_kfree_skb;
-
-       /* If packet is not checksummed and device does not support
-        * checksumming for this protocol, complete checksumming here.
-        */
-       if (skb->ip_summed == CHECKSUM_HW &&
-           (!(dev->features & NETIF_F_GEN_CSUM) &&
-            (!(dev->features & NETIF_F_IP_CSUM) ||
-             skb->protocol != htons(ETH_P_IP))))
-               if (skb_checksum_help(skb, 0))
-                       goto out_kfree_skb;
-
-gso:
-       spin_lock_prefetch(&dev->queue_lock);
-
-       /* Disable soft irqs for various locks below. Also 
-        * stops preemption for RCU. 
-        */
-       rcu_read_lock_bh(); 
-
-       /* Updates of qdisc are serialized by queue_lock. 
-        * The struct Qdisc which is pointed to by qdisc is now a 
-        * rcu structure - it may be accessed without acquiring 
-        * a lock (but the structure may be stale.) The freeing of the
-        * qdisc will be deferred until it's known that there are no 
-        * more references to it.
-        * 
-        * If the qdisc has an enqueue function, we still need to 
-        * hold the queue_lock before calling it, since queue_lock
-        * also serializes access to the device queue.
-        */
-
-       q = rcu_dereference(dev->qdisc);
-#ifdef CONFIG_NET_CLS_ACT
-       skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
-#endif
-       if (q->enqueue) {
-               /* Grab device queue */
-               spin_lock(&dev->queue_lock);
-
-               rc = q->enqueue(skb, q);
-
-               qdisc_run(dev);
-
-               spin_unlock(&dev->queue_lock);
-               rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
-               goto out;
-       }
-
-       /* The device has no queue. Common case for software devices:
-          loopback, all the sorts of tunnels...
-
-          Really, it is unlikely that netif_tx_lock protection is necessary
-          here.  (f.e. loopback and IP tunnels are clean ignoring statistics
-          counters.)
-          However, it is possible, that they rely on protection
-          made by us here.
-
-          Check this and shot the lock. It is not prone from deadlocks.
-          Either shot noqueue qdisc, it is even simpler 8)
-        */
-       if (dev->flags & IFF_UP) {
-               int cpu = smp_processor_id(); /* ok because BHs are off */
-
-               if (dev->xmit_lock_owner != cpu) {
-
-                       HARD_TX_LOCK(dev, cpu);
-
-                       if (!netif_queue_stopped(dev)) {
-                               rc = 0;
-                               if (!dev_hard_start_xmit(skb, dev)) {
-                                       HARD_TX_UNLOCK(dev);
-                                       goto out;
-                               }
-                       }
-                       HARD_TX_UNLOCK(dev);
-                       if (net_ratelimit())
-                               printk(KERN_CRIT "Virtual device %s asks to "
-                                      "queue packet!\n", dev->name);
-               } else {
-                       /* Recursion is detected! It is possible,
-                        * unfortunately */
-                       if (net_ratelimit())
-                               printk(KERN_CRIT "Dead loop on virtual device "
-                                      "%s, fix it urgently!\n", dev->name);
-               }
-       }
-
-       rc = -ENETDOWN;
-       rcu_read_unlock_bh();
-
-out_kfree_skb:
-       kfree_skb(skb);
-       return rc;
-out:
-       rcu_read_unlock_bh();
-       return rc;
-}
-
-
-/*=======================================================================
-                       Receiver routines
-  =======================================================================*/
-
-int netdev_max_backlog = 1000;
-int netdev_budget = 300;
-int weight_p = 64;            /* old backlog weight */
-
-DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
-
-
-/**
- *     netif_rx        -       post buffer to the network code
- *     @skb: buffer to post
- *
- *     This function receives a packet from a device driver and queues it for
- *     the upper (protocol) levels to process.  It always succeeds. The buffer
- *     may be dropped during processing for congestion control or by the
- *     protocol layers.
- *
- *     return values:
- *     NET_RX_SUCCESS  (no congestion)
- *     NET_RX_CN_LOW   (low congestion)
- *     NET_RX_CN_MOD   (moderate congestion)
- *     NET_RX_CN_HIGH  (high congestion)
- *     NET_RX_DROP     (packet was dropped)
- *
- */
-
-int netif_rx(struct sk_buff *skb)
-{
-       struct softnet_data *queue;
-       unsigned long flags;
-
-       /* if netpoll wants it, pretend we never saw it */
-       if (netpoll_rx(skb))
-               return NET_RX_DROP;
-
-       if (!skb->tstamp.off_sec)
-               net_timestamp(skb);
-
-       /*
-        * The code is rearranged so that the path is the most
-        * short when CPU is congested, but is still operating.
-        */
-       local_irq_save(flags);
-       queue = &__get_cpu_var(softnet_data);
-
-       __get_cpu_var(netdev_rx_stat).total++;
-       if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
-               if (queue->input_pkt_queue.qlen) {
-enqueue:
-                       dev_hold(skb->dev);
-                       __skb_queue_tail(&queue->input_pkt_queue, skb);
-                       local_irq_restore(flags);
-                       return NET_RX_SUCCESS;
-               }
-
-               netif_rx_schedule(&queue->backlog_dev);
-               goto enqueue;
-       }
-
-       __get_cpu_var(netdev_rx_stat).dropped++;
-       local_irq_restore(flags);
-
-       kfree_skb(skb);
-       return NET_RX_DROP;
-}
-
-int netif_rx_ni(struct sk_buff *skb)
-{
-       int err;
-
-       preempt_disable();
-       err = netif_rx(skb);
-       if (local_softirq_pending())
-               do_softirq();
-       preempt_enable();
-
-       return err;
-}
-
-EXPORT_SYMBOL(netif_rx_ni);
-
-static inline struct net_device *skb_bond(struct sk_buff *skb)
-{
-       struct net_device *dev = skb->dev;
-
-       if (dev->master) {
-               /*
-                * On bonding slaves other than the currently active
-                * slave, suppress duplicates except for 802.3ad
-                * ETH_P_SLOW and alb non-mcast/bcast.
-                */
-               if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
-                       if (dev->master->priv_flags & IFF_MASTER_ALB) {
-                               if (skb->pkt_type != PACKET_BROADCAST &&
-                                   skb->pkt_type != PACKET_MULTICAST)
-                                       goto keep;
-                       }
-
-                       if (dev->master->priv_flags & IFF_MASTER_8023AD &&
-                           skb->protocol == __constant_htons(ETH_P_SLOW))
-                               goto keep;
-               
-                       kfree_skb(skb);
-                       return NULL;
-               }
-keep:
-               skb->dev = dev->master;
-       }
-
-       return dev;
-}
-
-static void net_tx_action(struct softirq_action *h)
-{
-       struct softnet_data *sd = &__get_cpu_var(softnet_data);
-
-       if (sd->completion_queue) {
-               struct sk_buff *clist;
-
-               local_irq_disable();
-               clist = sd->completion_queue;
-               sd->completion_queue = NULL;
-               local_irq_enable();
-
-               while (clist) {
-                       struct sk_buff *skb = clist;
-                       clist = clist->next;
-
-                       BUG_TRAP(!atomic_read(&skb->users));
-                       __kfree_skb(skb);
-               }
-       }
-
-       if (sd->output_queue) {
-               struct net_device *head;
-
-               local_irq_disable();
-               head = sd->output_queue;
-               sd->output_queue = NULL;
-               local_irq_enable();
-
-               while (head) {
-                       struct net_device *dev = head;
-                       head = head->next_sched;
-
-                       smp_mb__before_clear_bit();
-                       clear_bit(__LINK_STATE_SCHED, &dev->state);
-
-                       if (spin_trylock(&dev->queue_lock)) {
-                               qdisc_run(dev);
-                               spin_unlock(&dev->queue_lock);
-                       } else {
-                               netif_schedule(dev);
-                       }
-               }
-       }
-}
-
-static __inline__ int deliver_skb(struct sk_buff *skb,
-                                 struct packet_type *pt_prev,
-                                 struct net_device *orig_dev)
-{
-       atomic_inc(&skb->users);
-       return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
-}
-
-#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
-int (*br_handle_frame_hook)(struct net_bridge_port *p, struct sk_buff **pskb);
-struct net_bridge;
-struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
-                                               unsigned char *addr);
-void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
-
-static __inline__ int handle_bridge(struct sk_buff **pskb,
-                                   struct packet_type **pt_prev, int *ret,
-                                   struct net_device *orig_dev)
-{
-       struct net_bridge_port *port;
-
-       if ((*pskb)->pkt_type == PACKET_LOOPBACK ||
-           (port = rcu_dereference((*pskb)->dev->br_port)) == NULL)
-               return 0;
-
-       if (*pt_prev) {
-               *ret = deliver_skb(*pskb, *pt_prev, orig_dev);
-               *pt_prev = NULL;
-       } 
-       
-       return br_handle_frame_hook(port, pskb);
-}
-#else
-#define handle_bridge(skb, pt_prev, ret, orig_dev)     (0)
-#endif
-
-#ifdef CONFIG_NET_CLS_ACT
-/* TODO: Maybe we should just force sch_ingress to be compiled in
- * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
- * a compare and 2 stores extra right now if we dont have it on
- * but have CONFIG_NET_CLS_ACT
- * NOTE: This doesnt stop any functionality; if you dont have 
- * the ingress scheduler, you just cant add policies on ingress.
- *
- */
-static int ing_filter(struct sk_buff *skb) 
-{
-       struct Qdisc *q;
-       struct net_device *dev = skb->dev;
-       int result = TC_ACT_OK;
-       
-       if (dev->qdisc_ingress) {
-               __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
-               if (MAX_RED_LOOP < ttl++) {
-                       printk("Redir loop detected Dropping packet (%s->%s)\n",
-                               skb->input_dev->name, skb->dev->name);
-                       return TC_ACT_SHOT;
-               }
-
-               skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
-
-               skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
-
-               spin_lock(&dev->ingress_lock);
-               if ((q = dev->qdisc_ingress) != NULL)
-                       result = q->enqueue(skb, q);
-               spin_unlock(&dev->ingress_lock);
-
-       }
-
-       return result;
-}
-#endif
-
-int netif_receive_skb(struct sk_buff *skb)
-{
-       struct packet_type *ptype, *pt_prev;
-       struct net_device *orig_dev;
-       int ret = NET_RX_DROP;
-       unsigned short type;
-
-       /* if we've gotten here through NAPI, check netpoll */
-       if (skb->dev->poll && netpoll_rx(skb))
-               return NET_RX_DROP;
-
-       if (!skb->tstamp.off_sec)
-               net_timestamp(skb);
-
-       if (!skb->input_dev)
-               skb->input_dev = skb->dev;
-
-       orig_dev = skb_bond(skb);
-
-       if (!orig_dev)
-               return NET_RX_DROP;
-
-       __get_cpu_var(netdev_rx_stat).total++;
-
-       skb->h.raw = skb->nh.raw = skb->data;
-       skb->mac_len = skb->nh.raw - skb->mac.raw;
-
-       pt_prev = NULL;
-
-       rcu_read_lock();
-
-#ifdef CONFIG_NET_CLS_ACT
-       if (skb->tc_verd & TC_NCLS) {
-               skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
-               goto ncls;
-       }
-#endif
-
-#ifdef CONFIG_XEN
-       switch (skb->ip_summed) {
-       case CHECKSUM_UNNECESSARY:
-               skb->proto_data_valid = 1;
-               break;
-       case CHECKSUM_HW:
-               /* XXX Implement me. */
-       default:
-               skb->proto_data_valid = 0;
-               break;
-       }
-#endif
-
-       list_for_each_entry_rcu(ptype, &ptype_all, list) {
-               if (!ptype->dev || ptype->dev == skb->dev) {
-                       if (pt_prev) 
-                               ret = deliver_skb(skb, pt_prev, orig_dev);
-                       pt_prev = ptype;
-               }
-       }
-
-#ifdef CONFIG_NET_CLS_ACT
-       if (pt_prev) {
-               ret = deliver_skb(skb, pt_prev, orig_dev);
-               pt_prev = NULL; /* noone else should process this after*/
-       } else {
-               skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
-       }
-
-       ret = ing_filter(skb);
-
-       if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) {
-               kfree_skb(skb);
-               goto out;
-       }
-
-       skb->tc_verd = 0;
-ncls:
-#endif
-
-       handle_diverter(skb);
-
-       if (handle_bridge(&skb, &pt_prev, &ret, orig_dev))
-               goto out;
-
-       type = skb->protocol;
-       list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
-               if (ptype->type == type &&
-                   (!ptype->dev || ptype->dev == skb->dev)) {
-                       if (pt_prev) 
-                               ret = deliver_skb(skb, pt_prev, orig_dev);
-                       pt_prev = ptype;
-               }
-       }
-
-       if (pt_prev) {
-               ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
-       } else {
-               kfree_skb(skb);
-               /* Jamal, now you will not able to escape explaining
-                * me how you were going to use this. :-)
-                */
-               ret = NET_RX_DROP;
-       }
-
-out:
-       rcu_read_unlock();
-       return ret;
-}
-
-static int process_backlog(struct net_device *backlog_dev, int *budget)
-{
-       int work = 0;
-       int quota = min(backlog_dev->quota, *budget);
-       struct softnet_data *queue = &__get_cpu_var(softnet_data);
-       unsigned long start_time = jiffies;
-
-       backlog_dev->weight = weight_p;
-       for (;;) {
-               struct sk_buff *skb;
-               struct net_device *dev;
-
-               local_irq_disable();
-               skb = __skb_dequeue(&queue->input_pkt_queue);
-               if (!skb)
-                       goto job_done;
-               local_irq_enable();
-
-               dev = skb->dev;
-
-               netif_receive_skb(skb);
-
-               dev_put(dev);
-
-               work++;
-
-               if (work >= quota || jiffies - start_time > 1)
-                       break;
-
-       }
-
-       backlog_dev->quota -= work;
-       *budget -= work;
-       return -1;
-
-job_done:
-       backlog_dev->quota -= work;
-       *budget -= work;
-
-       list_del(&backlog_dev->poll_list);
-       smp_mb__before_clear_bit();
-       netif_poll_enable(backlog_dev);
-
-       local_irq_enable();
-       return 0;
-}
-
-static void net_rx_action(struct softirq_action *h)
-{
-       struct softnet_data *queue = &__get_cpu_var(softnet_data);
-       unsigned long start_time = jiffies;
-       int budget = netdev_budget;
-       void *have;
-
-       local_irq_disable();
-
-       while (!list_empty(&queue->poll_list)) {
-               struct net_device *dev;
-
-               if (budget <= 0 || jiffies - start_time > 1)
-                       goto softnet_break;
-
-               local_irq_enable();
-
-               dev = list_entry(queue->poll_list.next,
-                                struct net_device, poll_list);
-               have = netpoll_poll_lock(dev);
-
-               if (dev->quota <= 0 || dev->poll(dev, &budget)) {
-                       netpoll_poll_unlock(have);
-                       local_irq_disable();
-                       list_move_tail(&dev->poll_list, &queue->poll_list);
-                       if (dev->quota < 0)
-                               dev->quota += dev->weight;
-                       else
-                               dev->quota = dev->weight;
-               } else {
-                       netpoll_poll_unlock(have);
-                       dev_put(dev);
-                       local_irq_disable();
-               }
-       }
-out:
-       local_irq_enable();
-       return;
-
-softnet_break:
-       __get_cpu_var(netdev_rx_stat).time_squeeze++;
-       __raise_softirq_irqoff(NET_RX_SOFTIRQ);
-       goto out;
-}
-
-static gifconf_func_t * gifconf_list [NPROTO];
-
-/**
- *     register_gifconf        -       register a SIOCGIF handler
- *     @family: Address family
- *     @gifconf: Function handler
- *
- *     Register protocol dependent address dumping routines. The handler
- *     that is passed must not be freed or reused until it has been replaced
- *     by another handler.
- */
-int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
-{
-       if (family >= NPROTO)
-               return -EINVAL;
-       gifconf_list[family] = gifconf;
-       return 0;
-}
-
-
-/*
- *     Map an interface index to its name (SIOCGIFNAME)
- */
-
-/*
- *     We need this ioctl for efficient implementation of the
- *     if_indextoname() function required by the IPv6 API.  Without
- *     it, we would have to search all the interfaces to find a
- *     match.  --pb
- */
-
-static int dev_ifname(struct ifreq __user *arg)
-{
-       struct net_device *dev;
-       struct ifreq ifr;
-
-       /*
-        *      Fetch the caller's info block.
-        */
-
-       if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
-               return -EFAULT;
-
-       read_lock(&dev_base_lock);
-       dev = __dev_get_by_index(ifr.ifr_ifindex);
-       if (!dev) {
-               read_unlock(&dev_base_lock);
-               return -ENODEV;
-       }
-
-       strcpy(ifr.ifr_name, dev->name);
-       read_unlock(&dev_base_lock);
-
-       if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
-               return -EFAULT;
-       return 0;
-}
-
-/*
- *     Perform a SIOCGIFCONF call. This structure will change
- *     size eventually, and there is nothing I can do about it.
- *     Thus we will need a 'compatibility mode'.
- */
-
-static int dev_ifconf(char __user *arg)
-{
-       struct ifconf ifc;
-       struct net_device *dev;
-       char __user *pos;
-       int len;
-       int total;
-       int i;
-
-       /*
-        *      Fetch the caller's info block.
-        */
-
-       if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
-               return -EFAULT;
-
-       pos = ifc.ifc_buf;
-       len = ifc.ifc_len;
-
-       /*
-        *      Loop over the interfaces, and write an info block for each.
-        */
-
-       total = 0;
-       for (dev = dev_base; dev; dev = dev->next) {
-               if (vx_flags(VXF_HIDE_NETIF, 0) &&
-                       !dev_in_nx_info(dev, current->nx_info))
-                       continue;
-               for (i = 0; i < NPROTO; i++) {
-                       if (gifconf_list[i]) {
-                               int done;
-                               if (!pos)
-                                       done = gifconf_list[i](dev, NULL, 0);
-                               else
-                                       done = gifconf_list[i](dev, pos + total,
-                                                              len - total);
-                               if (done < 0)
-                                       return -EFAULT;
-                               total += done;
-                       }
-               }
-       }
-
-       /*
-        *      All done.  Write the updated control block back to the caller.
-        */
-       ifc.ifc_len = total;
-
-       /*
-        *      Both BSD and Solaris return 0 here, so we do too.
-        */
-       return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
-}
-
-#ifdef CONFIG_PROC_FS
-/*
- *     This is invoked by the /proc filesystem handler to display a device
- *     in detail.
- */
-static __inline__ struct net_device *dev_get_idx(loff_t pos)
-{
-       struct net_device *dev;
-       loff_t i;
-
-       for (i = 0, dev = dev_base; dev && i < pos; ++i, dev = dev->next);
-
-       return i == pos ? dev : NULL;
-}
-
-void *dev_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       read_lock(&dev_base_lock);
-       return *pos ? dev_get_idx(*pos - 1) : SEQ_START_TOKEN;
-}
-
-void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       ++*pos;
-       return v == SEQ_START_TOKEN ? dev_base : ((struct net_device *)v)->next;
-}
-
-void dev_seq_stop(struct seq_file *seq, void *v)
-{
-       read_unlock(&dev_base_lock);
-}
-
-static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
-{
-       struct nx_info *nxi = current->nx_info;
-
-       if (vx_flags(VXF_HIDE_NETIF, 0) && !dev_in_nx_info(dev, nxi))
-               return;
-       if (dev->get_stats) {
-               struct net_device_stats *stats = dev->get_stats(dev);
-
-               seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
-                               "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
-                          dev->name, stats->rx_bytes, stats->rx_packets,
-                          stats->rx_errors,
-                          stats->rx_dropped + stats->rx_missed_errors,
-                          stats->rx_fifo_errors,
-                          stats->rx_length_errors + stats->rx_over_errors +
-                            stats->rx_crc_errors + stats->rx_frame_errors,
-                          stats->rx_compressed, stats->multicast,
-                          stats->tx_bytes, stats->tx_packets,
-                          stats->tx_errors, stats->tx_dropped,
-                          stats->tx_fifo_errors, stats->collisions,
-                          stats->tx_carrier_errors +
-                            stats->tx_aborted_errors +
-                            stats->tx_window_errors +
-                            stats->tx_heartbeat_errors,
-                          stats->tx_compressed);
-       } else
-               seq_printf(seq, "%6s: No statistics available.\n", dev->name);
-}
-
-/*
- *     Called from the PROCfs module. This now uses the new arbitrary sized
- *     /proc/net interface to create /proc/net/dev
- */
-static int dev_seq_show(struct seq_file *seq, void *v)
-{
-       if (v == SEQ_START_TOKEN)
-               seq_puts(seq, "Inter-|   Receive                            "
-                             "                    |  Transmit\n"
-                             " face |bytes    packets errs drop fifo frame "
-                             "compressed multicast|bytes    packets errs "
-                             "drop fifo colls carrier compressed\n");
-       else
-               dev_seq_printf_stats(seq, v);
-       return 0;
-}
-
-static struct netif_rx_stats *softnet_get_online(loff_t *pos)
-{
-       struct netif_rx_stats *rc = NULL;
-
-       while (*pos < NR_CPUS)
-               if (cpu_online(*pos)) {
-                       rc = &per_cpu(netdev_rx_stat, *pos);
-                       break;
-               } else
-                       ++*pos;
-       return rc;
-}
-
-static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
-{
-       return softnet_get_online(pos);
-}
-
-static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-       ++*pos;
-       return softnet_get_online(pos);
-}
-
-static void softnet_seq_stop(struct seq_file *seq, void *v)
-{
-}
-
-static int softnet_seq_show(struct seq_file *seq, void *v)
-{
-       struct netif_rx_stats *s = v;
-
-       seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
-                  s->total, s->dropped, s->time_squeeze, 0,
-                  0, 0, 0, 0, /* was fastroute */
-                  s->cpu_collision );
-       return 0;
-}
-
-static struct seq_operations dev_seq_ops = {
-       .start = dev_seq_start,
-       .next  = dev_seq_next,
-       .stop  = dev_seq_stop,
-       .show  = dev_seq_show,
-};
-
-static int dev_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &dev_seq_ops);
-}
-
-static struct file_operations dev_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = dev_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-static struct seq_operations softnet_seq_ops = {
-       .start = softnet_seq_start,
-       .next  = softnet_seq_next,
-       .stop  = softnet_seq_stop,
-       .show  = softnet_seq_show,
-};
-
-static int softnet_seq_open(struct inode *inode, struct file *file)
-{
-       return seq_open(file, &softnet_seq_ops);
-}
-
-static struct file_operations softnet_seq_fops = {
-       .owner   = THIS_MODULE,
-       .open    = softnet_seq_open,
-       .read    = seq_read,
-       .llseek  = seq_lseek,
-       .release = seq_release,
-};
-
-#ifdef CONFIG_WIRELESS_EXT
-extern int wireless_proc_init(void);
-#else
-#define wireless_proc_init() 0
-#endif
-
-static int __init dev_proc_init(void)
-{
-       int rc = -ENOMEM;
-
-       if (!proc_net_fops_create("dev", S_IRUGO, &dev_seq_fops))
-               goto out;
-       if (!proc_net_fops_create("softnet_stat", S_IRUGO, &softnet_seq_fops))
-               goto out_dev;
-       if (wireless_proc_init())
-               goto out_softnet;
-       rc = 0;
-out:
-       return rc;
-out_softnet:
-       proc_net_remove("softnet_stat");
-out_dev:
-       proc_net_remove("dev");
-       goto out;
-}
-#else
-#define dev_proc_init() 0
-#endif /* CONFIG_PROC_FS */
-
-
-/**
- *     netdev_set_master       -       set up master/slave pair
- *     @slave: slave device
- *     @master: new master device
- *
- *     Changes the master device of the slave. Pass %NULL to break the
- *     bonding. The caller must hold the RTNL semaphore. On a failure
- *     a negative errno code is returned. On success the reference counts
- *     are adjusted, %RTM_NEWLINK is sent to the routing socket and the
- *     function returns zero.
- */
-int netdev_set_master(struct net_device *slave, struct net_device *master)
-{
-       struct net_device *old = slave->master;
-
-       ASSERT_RTNL();
-
-       if (master) {
-               if (old)
-                       return -EBUSY;
-               dev_hold(master);
-       }
-
-       slave->master = master;
-       
-       synchronize_net();
-
-       if (old)
-               dev_put(old);
-
-       if (master)
-               slave->flags |= IFF_SLAVE;
-       else
-               slave->flags &= ~IFF_SLAVE;
-
-       rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
-       return 0;
-}
-
-/**
- *     dev_set_promiscuity     - update promiscuity count on a device
- *     @dev: device
- *     @inc: modifier
- *
- *     Add or remove promiscuity from a device. While the count in the device
- *     remains above zero the interface remains promiscuous. Once it hits zero
- *     the device reverts back to normal filtering operation. A negative inc
- *     value is used to drop promiscuity on the device.
- */
-void dev_set_promiscuity(struct net_device *dev, int inc)
-{
-       unsigned short old_flags = dev->flags;
-
-       if ((dev->promiscuity += inc) == 0)
-               dev->flags &= ~IFF_PROMISC;
-       else
-               dev->flags |= IFF_PROMISC;
-       if (dev->flags != old_flags) {
-               dev_mc_upload(dev);
-               printk(KERN_INFO "device %s %s promiscuous mode\n",
-                      dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
-                                                              "left");
-               audit_log(current->audit_context, GFP_ATOMIC,
-                       AUDIT_ANOM_PROMISCUOUS,
-                       "dev=%s prom=%d old_prom=%d auid=%u",
-                       dev->name, (dev->flags & IFF_PROMISC),
-                       (old_flags & IFF_PROMISC),
-                       audit_get_loginuid(current->audit_context)); 
-       }
-}
-
-/**
- *     dev_set_allmulti        - update allmulti count on a device
- *     @dev: device
- *     @inc: modifier
- *
- *     Add or remove reception of all multicast frames to a device. While the
- *     count in the device remains above zero the interface remains listening
- *     to all interfaces. Once it hits zero the device reverts back to normal
- *     filtering operation. A negative @inc value is used to drop the counter
- *     when releasing a resource needing all multicasts.
- */
-
-void dev_set_allmulti(struct net_device *dev, int inc)
-{
-       unsigned short old_flags = dev->flags;
-
-       dev->flags |= IFF_ALLMULTI;
-       if ((dev->allmulti += inc) == 0)
-               dev->flags &= ~IFF_ALLMULTI;
-       if (dev->flags ^ old_flags)
-               dev_mc_upload(dev);
-}
-
-unsigned dev_get_flags(const struct net_device *dev)
-{
-       unsigned flags;
-
-       flags = (dev->flags & ~(IFF_PROMISC |
-                               IFF_ALLMULTI |
-                               IFF_RUNNING |
-                               IFF_LOWER_UP |
-                               IFF_DORMANT)) |
-               (dev->gflags & (IFF_PROMISC |
-                               IFF_ALLMULTI));
-
-       if (netif_running(dev)) {
-               if (netif_oper_up(dev))
-                       flags |= IFF_RUNNING;
-               if (netif_carrier_ok(dev))
-                       flags |= IFF_LOWER_UP;
-               if (netif_dormant(dev))
-                       flags |= IFF_DORMANT;
-       }
-
-       return flags;
-}
-
-int dev_change_flags(struct net_device *dev, unsigned flags)
-{
-       int ret;
-       int old_flags = dev->flags;
-
-       /*
-        *      Set the flags on our device.
-        */
-
-       dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
-                              IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
-                              IFF_AUTOMEDIA)) |
-                    (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
-                                   IFF_ALLMULTI));
-
-       /*
-        *      Load in the correct multicast list now the flags have changed.
-        */
-
-       dev_mc_upload(dev);
-
-       /*
-        *      Have we downed the interface. We handle IFF_UP ourselves
-        *      according to user attempts to set it, rather than blindly
-        *      setting it.
-        */
-
-       ret = 0;
-       if ((old_flags ^ flags) & IFF_UP) {     /* Bit is different  ? */
-               ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
-
-               if (!ret)
-                       dev_mc_upload(dev);
-       }
-
-       if (dev->flags & IFF_UP &&
-           ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
-                                         IFF_VOLATILE)))
-               raw_notifier_call_chain(&netdev_chain,
-                               NETDEV_CHANGE, dev);
-
-       if ((flags ^ dev->gflags) & IFF_PROMISC) {
-               int inc = (flags & IFF_PROMISC) ? +1 : -1;
-               dev->gflags ^= IFF_PROMISC;
-               dev_set_promiscuity(dev, inc);
-       }
-
-       /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
-          is important. Some (broken) drivers set IFF_PROMISC, when
-          IFF_ALLMULTI is requested not asking us and not reporting.
-        */
-       if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
-               int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
-               dev->gflags ^= IFF_ALLMULTI;
-               dev_set_allmulti(dev, inc);
-       }
-
-       if (old_flags ^ dev->flags)
-               rtmsg_ifinfo(RTM_NEWLINK, dev, old_flags ^ dev->flags);
-
-       return ret;
-}
-
-int dev_set_mtu(struct net_device *dev, int new_mtu)
-{
-       int err;
-
-       if (new_mtu == dev->mtu)
-               return 0;
-
-       /*      MTU must be positive.    */
-       if (new_mtu < 0)
-               return -EINVAL;
-
-       if (!netif_device_present(dev))
-               return -ENODEV;
-
-       err = 0;
-       if (dev->change_mtu)
-               err = dev->change_mtu(dev, new_mtu);
-       else
-               dev->mtu = new_mtu;
-       if (!err && dev->flags & IFF_UP)
-               raw_notifier_call_chain(&netdev_chain,
-                               NETDEV_CHANGEMTU, dev);
-       return err;
-}
-
-int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
-{
-       int err;
-
-       if (!dev->set_mac_address)
-               return -EOPNOTSUPP;
-       if (sa->sa_family != dev->type)
-               return -EINVAL;
-       if (!netif_device_present(dev))
-               return -ENODEV;
-       err = dev->set_mac_address(dev, sa);
-       if (!err)
-               raw_notifier_call_chain(&netdev_chain,
-                               NETDEV_CHANGEADDR, dev);
-       return err;
-}
-
-/*
- *     Perform the SIOCxIFxxx calls.
- */
-static int dev_ifsioc(struct ifreq *ifr, unsigned int cmd)
-{
-       int err;
-       struct net_device *dev = __dev_get_by_name(ifr->ifr_name);
-
-       if (!dev)
-               return -ENODEV;
-
-       switch (cmd) {
-               case SIOCGIFFLAGS:      /* Get interface flags */
-                       ifr->ifr_flags = dev_get_flags(dev);
-                       return 0;
-
-               case SIOCSIFFLAGS:      /* Set interface flags */
-                       return dev_change_flags(dev, ifr->ifr_flags);
-
-               case SIOCGIFMETRIC:     /* Get the metric on the interface
-                                          (currently unused) */
-                       ifr->ifr_metric = 0;
-                       return 0;
-
-               case SIOCSIFMETRIC:     /* Set the metric on the interface
-                                          (currently unused) */
-                       return -EOPNOTSUPP;
-
-               case SIOCGIFMTU:        /* Get the MTU of a device */
-                       ifr->ifr_mtu = dev->mtu;
-                       return 0;
-
-               case SIOCSIFMTU:        /* Set the MTU of a device */
-                       return dev_set_mtu(dev, ifr->ifr_mtu);
-
-               case SIOCGIFHWADDR:
-                       if (!dev->addr_len)
-                               memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
-                       else
-                               memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
-                                      min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
-                       ifr->ifr_hwaddr.sa_family = dev->type;
-                       return 0;
-
-               case SIOCSIFHWADDR:
-                       return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
-
-               case SIOCSIFHWBROADCAST:
-                       if (ifr->ifr_hwaddr.sa_family != dev->type)
-                               return -EINVAL;
-                       memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
-                              min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
-                       raw_notifier_call_chain(&netdev_chain,
-                                           NETDEV_CHANGEADDR, dev);
-                       return 0;
-
-               case SIOCGIFMAP:
-                       ifr->ifr_map.mem_start = dev->mem_start;
-                       ifr->ifr_map.mem_end   = dev->mem_end;
-                       ifr->ifr_map.base_addr = dev->base_addr;
-                       ifr->ifr_map.irq       = dev->irq;
-                       ifr->ifr_map.dma       = dev->dma;
-                       ifr->ifr_map.port      = dev->if_port;
-                       return 0;
-
-               case SIOCSIFMAP:
-                       if (dev->set_config) {
-                               if (!netif_device_present(dev))
-                                       return -ENODEV;
-                               return dev->set_config(dev, &ifr->ifr_map);
-                       }
-                       return -EOPNOTSUPP;
-
-               case SIOCADDMULTI:
-                       if (!dev->set_multicast_list ||
-                           ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
-                               return -EINVAL;
-                       if (!netif_device_present(dev))
-                               return -ENODEV;
-                       return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
-                                         dev->addr_len, 1);
-
-               case SIOCDELMULTI:
-                       if (!dev->set_multicast_list ||
-                           ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
-                               return -EINVAL;
-                       if (!netif_device_present(dev))
-                               return -ENODEV;
-                       return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
-                                            dev->addr_len, 1);
-
-               case SIOCGIFINDEX:
-                       ifr->ifr_ifindex = dev->ifindex;
-                       return 0;
-
-               case SIOCGIFTXQLEN:
-                       ifr->ifr_qlen = dev->tx_queue_len;
-                       return 0;
-
-               case SIOCSIFTXQLEN:
-                       if (ifr->ifr_qlen < 0)
-                               return -EINVAL;
-                       dev->tx_queue_len = ifr->ifr_qlen;
-                       return 0;
-
-               case SIOCSIFNAME:
-                       ifr->ifr_newname[IFNAMSIZ-1] = '\0';
-                       return dev_change_name(dev, ifr->ifr_newname);
-
-               /*
-                *      Unknown or private ioctl
-                */
-
-               default:
-                       if ((cmd >= SIOCDEVPRIVATE &&
-                           cmd <= SIOCDEVPRIVATE + 15) ||
-                           cmd == SIOCBONDENSLAVE ||
-                           cmd == SIOCBONDRELEASE ||
-                           cmd == SIOCBONDSETHWADDR ||
-                           cmd == SIOCBONDSLAVEINFOQUERY ||
-                           cmd == SIOCBONDINFOQUERY ||
-                           cmd == SIOCBONDCHANGEACTIVE ||
-                           cmd == SIOCGMIIPHY ||
-                           cmd == SIOCGMIIREG ||
-                           cmd == SIOCSMIIREG ||
-                           cmd == SIOCBRADDIF ||
-                           cmd == SIOCBRDELIF ||
-                           cmd == SIOCWANDEV) {
-                               err = -EOPNOTSUPP;
-                               if (dev->do_ioctl) {
-                                       if (netif_device_present(dev))
-                                               err = dev->do_ioctl(dev, ifr,
-                                                                   cmd);
-                                       else
-                                               err = -ENODEV;
-                               }
-                       } else
-                               err = -EINVAL;
-
-       }
-       return err;
-}
-
-/*
- *     This function handles all "interface"-type I/O control requests. The actual
- *     'doing' part of this is dev_ifsioc above.
- */
-
-/**
- *     dev_ioctl       -       network device ioctl
- *     @cmd: command to issue
- *     @arg: pointer to a struct ifreq in user space
- *
- *     Issue ioctl functions to devices. This is normally called by the
- *     user space syscall interfaces but can sometimes be useful for
- *     other purposes. The return value is the return from the syscall if
- *     positive or a negative errno code on error.
- */
-
-int dev_ioctl(unsigned int cmd, void __user *arg)
-{
-       struct ifreq ifr;
-       int ret;
-       char *colon;
-
-       /* One special case: SIOCGIFCONF takes ifconf argument
-          and requires shared lock, because it sleeps writing
-          to user space.
-        */
-
-       if (cmd == SIOCGIFCONF) {
-               rtnl_lock();
-               ret = dev_ifconf((char __user *) arg);
-               rtnl_unlock();
-               return ret;
-       }
-       if (cmd == SIOCGIFNAME)
-               return dev_ifname((struct ifreq __user *)arg);
-
-       if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
-               return -EFAULT;
-
-       ifr.ifr_name[IFNAMSIZ-1] = 0;
-
-       colon = strchr(ifr.ifr_name, ':');
-       if (colon)
-               *colon = 0;
-
-       /*
-        *      See which interface the caller is talking about.
-        */
-
-       switch (cmd) {
-               /*
-                *      These ioctl calls:
-                *      - can be done by all.
-                *      - atomic and do not require locking.
-                *      - return a value
-                */
-               case SIOCGIFFLAGS:
-               case SIOCGIFMETRIC:
-               case SIOCGIFMTU:
-               case SIOCGIFHWADDR:
-               case SIOCGIFSLAVE:
-               case SIOCGIFMAP:
-               case SIOCGIFINDEX:
-               case SIOCGIFTXQLEN:
-                       dev_load(ifr.ifr_name);
-                       read_lock(&dev_base_lock);
-                       ret = dev_ifsioc(&ifr, cmd);
-                       read_unlock(&dev_base_lock);
-                       if (!ret) {
-                               if (colon)
-                                       *colon = ':';
-                               if (copy_to_user(arg, &ifr,
-                                                sizeof(struct ifreq)))
-                                       ret = -EFAULT;
-                       }
-                       return ret;
-
-               case SIOCETHTOOL:
-                       dev_load(ifr.ifr_name);
-                       rtnl_lock();
-                       ret = dev_ethtool(&ifr);
-                       rtnl_unlock();
-                       if (!ret) {
-                               if (colon)
-                                       *colon = ':';
-                               if (copy_to_user(arg, &ifr,
-                                                sizeof(struct ifreq)))
-                                       ret = -EFAULT;
-                       }
-                       return ret;
-
-               /*
-                *      These ioctl calls:
-                *      - require superuser power.
-                *      - require strict serialization.
-                *      - return a value
-                */
-               case SIOCGMIIPHY:
-               case SIOCGMIIREG:
-               case SIOCSIFNAME:
-                       if (!capable(CAP_NET_ADMIN))
-                               return -EPERM;
-                       dev_load(ifr.ifr_name);
-                       rtnl_lock();
-                       ret = dev_ifsioc(&ifr, cmd);
-                       rtnl_unlock();
-                       if (!ret) {
-                               if (colon)
-                                       *colon = ':';
-                               if (copy_to_user(arg, &ifr,
-                                                sizeof(struct ifreq)))
-                                       ret = -EFAULT;
-                       }
-                       return ret;
-
-               /*
-                *      These ioctl calls:
-                *      - require superuser power.
-                *      - require strict serialization.
-                *      - do not return a value
-                */
-               case SIOCSIFFLAGS:
-               case SIOCSIFMETRIC:
-               case SIOCSIFMTU:
-               case SIOCSIFMAP:
-               case SIOCSIFHWADDR:
-               case SIOCSIFSLAVE:
-               case SIOCADDMULTI:
-               case SIOCDELMULTI:
-               case SIOCSIFHWBROADCAST:
-               case SIOCSIFTXQLEN:
-               case SIOCSMIIREG:
-               case SIOCBONDENSLAVE:
-               case SIOCBONDRELEASE:
-               case SIOCBONDSETHWADDR:
-               case SIOCBONDCHANGEACTIVE:
-               case SIOCBRADDIF:
-               case SIOCBRDELIF:
-                       if (!capable(CAP_NET_ADMIN))
-                               return -EPERM;
-                       /* fall through */
-               case SIOCBONDSLAVEINFOQUERY:
-               case SIOCBONDINFOQUERY:
-                       dev_load(ifr.ifr_name);
-                       rtnl_lock();
-                       ret = dev_ifsioc(&ifr, cmd);
-                       rtnl_unlock();
-                       return ret;
-
-               case SIOCGIFMEM:
-                       /* Get the per device memory space. We can add this but
-                        * currently do not support it */
-               case SIOCSIFMEM:
-                       /* Set the per device memory buffer space.
-                        * Not applicable in our case */
-               case SIOCSIFLINK:
-                       return -EINVAL;
-
-               /*
-                *      Unknown or private ioctl.
-                */
-               default:
-                       if (cmd == SIOCWANDEV ||
-                           (cmd >= SIOCDEVPRIVATE &&
-                            cmd <= SIOCDEVPRIVATE + 15)) {
-                               dev_load(ifr.ifr_name);
-                               rtnl_lock();
-                               ret = dev_ifsioc(&ifr, cmd);
-                               rtnl_unlock();
-                               if (!ret && copy_to_user(arg, &ifr,
-                                                        sizeof(struct ifreq)))
-                                       ret = -EFAULT;
-                               return ret;
-                       }
-#ifdef CONFIG_WIRELESS_EXT
-                       /* Take care of Wireless Extensions */
-                       if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
-                               /* If command is `set a parameter', or
-                                * `get the encoding parameters', check if
-                                * the user has the right to do it */
-                               if (IW_IS_SET(cmd) || cmd == SIOCGIWENCODE
-                                   || cmd == SIOCGIWENCODEEXT) {
-                                       if (!capable(CAP_NET_ADMIN))
-                                               return -EPERM;
-                               }
-                               dev_load(ifr.ifr_name);
-                               rtnl_lock();
-                               /* Follow me in net/core/wireless.c */
-                               ret = wireless_process_ioctl(&ifr, cmd);
-                               rtnl_unlock();
-                               if (IW_IS_GET(cmd) &&
-                                   copy_to_user(arg, &ifr,
-                                                sizeof(struct ifreq)))
-                                       ret = -EFAULT;
-                               return ret;
-                       }
-#endif /* CONFIG_WIRELESS_EXT */
-                       return -EINVAL;
-       }
-}
-
-
-/**
- *     dev_new_index   -       allocate an ifindex
- *
- *     Returns a suitable unique value for a new device interface
- *     number.  The caller must hold the rtnl semaphore or the
- *     dev_base_lock to be sure it remains unique.
- */
-static int dev_new_index(void)
-{
-       static int ifindex;
-       for (;;) {
-               if (++ifindex <= 0)
-                       ifindex = 1;
-               if (!__dev_get_by_index(ifindex))
-                       return ifindex;
-       }
-}
-
-static int dev_boot_phase = 1;
-
-/* Delayed registration/unregisteration */
-static DEFINE_SPINLOCK(net_todo_list_lock);
-static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
-
-static inline void net_set_todo(struct net_device *dev)
-{
-       spin_lock(&net_todo_list_lock);
-       list_add_tail(&dev->todo_list, &net_todo_list);
-       spin_unlock(&net_todo_list_lock);
-}
-
-/**
- *     register_netdevice      - register a network device
- *     @dev: device to register
- *
- *     Take a completed network device structure and add it to the kernel
- *     interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
- *     chain. 0 is returned on success. A negative errno code is returned
- *     on a failure to set up the device, or if the name is a duplicate.
- *
- *     Callers must hold the rtnl semaphore. You may want
- *     register_netdev() instead of this.
- *
- *     BUGS:
- *     The locking appears insufficient to guarantee two parallel registers
- *     will not get the same name.
- */
-
-int register_netdevice(struct net_device *dev)
-{
-       struct hlist_head *head;
-       struct hlist_node *p;
-       int ret;
-
-       BUG_ON(dev_boot_phase);
-       ASSERT_RTNL();
-
-       might_sleep();
-
-       /* When net_device's are persistent, this will be fatal. */
-       BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
-
-       spin_lock_init(&dev->queue_lock);
-       spin_lock_init(&dev->_xmit_lock);
-       dev->xmit_lock_owner = -1;
-#ifdef CONFIG_NET_CLS_ACT
-       spin_lock_init(&dev->ingress_lock);
-#endif
-
-       ret = alloc_divert_blk(dev);
-       if (ret)
-               goto out;
-
-       dev->iflink = -1;
-
-       /* Init, if this function is available */
-       if (dev->init) {
-               ret = dev->init(dev);
-               if (ret) {
-                       if (ret > 0)
-                               ret = -EIO;
-                       goto out_err;
-               }
-       }
-       if (!dev_valid_name(dev->name)) {
-               ret = -EINVAL;
-               goto out_err;
-       }
-
-       dev->ifindex = dev_new_index();
-       if (dev->iflink == -1)
-               dev->iflink = dev->ifindex;
-
-       /* Check for existence of name */
-       head = dev_name_hash(dev->name);
-       hlist_for_each(p, head) {
-               struct net_device *d
-                       = hlist_entry(p, struct net_device, name_hlist);
-               if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
-                       ret = -EEXIST;
-                       goto out_err;
-               }
-       }
-
-       /* Fix illegal SG+CSUM combinations. */
-       if ((dev->features & NETIF_F_SG) &&
-           !(dev->features & NETIF_F_ALL_CSUM)) {
-               printk("%s: Dropping NETIF_F_SG since no checksum feature.\n",
-                      dev->name);
-               dev->features &= ~NETIF_F_SG;
-       }
-
-       /* TSO requires that SG is present as well. */
-       if ((dev->features & NETIF_F_TSO) &&
-           !(dev->features & NETIF_F_SG)) {
-               printk("%s: Dropping NETIF_F_TSO since no SG feature.\n",
-                      dev->name);
-               dev->features &= ~NETIF_F_TSO;
-       }
-       if (dev->features & NETIF_F_UFO) {
-               if (!(dev->features & NETIF_F_HW_CSUM)) {
-                       printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
-                                       "NETIF_F_HW_CSUM feature.\n",
-                                                       dev->name);
-                       dev->features &= ~NETIF_F_UFO;
-               }
-               if (!(dev->features & NETIF_F_SG)) {
-                       printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
-                                       "NETIF_F_SG feature.\n",
-                                       dev->name);
-                       dev->features &= ~NETIF_F_UFO;
-               }
-       }
-
-       /*
-        *      nil rebuild_header routine,
-        *      that should be never called and used as just bug trap.
-        */
-
-       if (!dev->rebuild_header)
-               dev->rebuild_header = default_rebuild_header;
-
-       ret = netdev_register_sysfs(dev);
-       if (ret)
-               goto out_err;
-       dev->reg_state = NETREG_REGISTERED;
-
-       /*
-        *      Default initial state at registry is that the
-        *      device is present.
-        */
-
-       set_bit(__LINK_STATE_PRESENT, &dev->state);
-
-       dev->next = NULL;
-       dev_init_scheduler(dev);
-       write_lock_bh(&dev_base_lock);
-       *dev_tail = dev;
-       dev_tail = &dev->next;
-       hlist_add_head(&dev->name_hlist, head);
-       hlist_add_head(&dev->index_hlist, dev_index_hash(dev->ifindex));
-       dev_hold(dev);
-       write_unlock_bh(&dev_base_lock);
-
-       /* Notify protocols, that a new device appeared. */
-       raw_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
-
-       ret = 0;
-
-out:
-       return ret;
-out_err:
-       free_divert_blk(dev);
-       goto out;
-}
-
-/**
- *     register_netdev - register a network device
- *     @dev: device to register
- *
- *     Take a completed network device structure and add it to the kernel
- *     interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
- *     chain. 0 is returned on success. A negative errno code is returned
- *     on a failure to set up the device, or if the name is a duplicate.
- *
- *     This is a wrapper around register_netdev that takes the rtnl semaphore
- *     and expands the device name if you passed a format string to
- *     alloc_netdev.
- */
-int register_netdev(struct net_device *dev)
-{
-       int err;
-
-       rtnl_lock();
-
-       /*
-        * If the name is a format string the caller wants us to do a
-        * name allocation.
-        */
-       if (strchr(dev->name, '%')) {
-               err = dev_alloc_name(dev, dev->name);
-               if (err < 0)
-                       goto out;
-       }
-       
-       /*
-        * Back compatibility hook. Kill this one in 2.5
-        */
-       if (dev->name[0] == 0 || dev->name[0] == ' ') {
-               err = dev_alloc_name(dev, "eth%d");
-               if (err < 0)
-                       goto out;
-       }
-
-       err = register_netdevice(dev);
-out:
-       rtnl_unlock();
-       return err;
-}
-EXPORT_SYMBOL(register_netdev);
-
-/*
- * netdev_wait_allrefs - wait until all references are gone.
- *
- * This is called when unregistering network devices.
- *
- * Any protocol or device that holds a reference should register
- * for netdevice notification, and cleanup and put back the
- * reference if they receive an UNREGISTER event.
- * We can get stuck here if buggy protocols don't correctly
- * call dev_put. 
- */
-static void netdev_wait_allrefs(struct net_device *dev)
-{
-       unsigned long rebroadcast_time, warning_time;
-
-       rebroadcast_time = warning_time = jiffies;
-       while (atomic_read(&dev->refcnt) != 0) {
-               if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
-                       rtnl_lock();
-
-                       /* Rebroadcast unregister notification */
-                       raw_notifier_call_chain(&netdev_chain,
-                                           NETDEV_UNREGISTER, dev);
-
-                       if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
-                                    &dev->state)) {
-                               /* We must not have linkwatch events
-                                * pending on unregister. If this
-                                * happens, we simply run the queue
-                                * unscheduled, resulting in a noop
-                                * for this device.
-                                */
-                               linkwatch_run_queue();
-                       }
-
-                       __rtnl_unlock();
-
-                       rebroadcast_time = jiffies;
-               }
-
-               msleep(250);
-
-               if (time_after(jiffies, warning_time + 10 * HZ)) {
-                       printk(KERN_EMERG "unregister_netdevice: "
-                              "waiting for %s to become free. Usage "
-                              "count = %d\n",
-                              dev->name, atomic_read(&dev->refcnt));
-                       warning_time = jiffies;
-               }
-       }
-}
-
-/* The sequence is:
- *
- *     rtnl_lock();
- *     ...
- *     register_netdevice(x1);
- *     register_netdevice(x2);
- *     ...
- *     unregister_netdevice(y1);
- *     unregister_netdevice(y2);
- *      ...
- *     rtnl_unlock();
- *     free_netdev(y1);
- *     free_netdev(y2);
- *
- * We are invoked by rtnl_unlock() after it drops the semaphore.
- * This allows us to deal with problems:
- * 1) We can delete sysfs objects which invoke hotplug
- *    without deadlocking with linkwatch via keventd.
- * 2) Since we run with the RTNL semaphore not held, we can sleep
- *    safely in order to wait for the netdev refcnt to drop to zero.
- */
-static DEFINE_MUTEX(net_todo_run_mutex);
-void netdev_run_todo(void)
-{
-       struct list_head list = LIST_HEAD_INIT(list);
-
-       /* Need to guard against multiple cpu's getting out of order. */
-       mutex_lock(&net_todo_run_mutex);
-
-       /* Not safe to do outside the semaphore.  We must not return
-        * until all unregister events invoked by the local processor
-        * have been completed (either by this todo run, or one on
-        * another cpu).
-        */
-       if (list_empty(&net_todo_list))
-               goto out;
-
-       /* Snapshot list, allow later requests */
-       spin_lock(&net_todo_list_lock);
-       list_splice_init(&net_todo_list, &list);
-       spin_unlock(&net_todo_list_lock);
-               
-       while (!list_empty(&list)) {
-               struct net_device *dev
-                       = list_entry(list.next, struct net_device, todo_list);
-               list_del(&dev->todo_list);
-
-               if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
-                       printk(KERN_ERR "network todo '%s' but state %d\n",
-                              dev->name, dev->reg_state);
-                       dump_stack();
-                       continue;
-               }
-
-               netdev_unregister_sysfs(dev);
-               dev->reg_state = NETREG_UNREGISTERED;
-
-               netdev_wait_allrefs(dev);
-
-               /* paranoia */
-               BUG_ON(atomic_read(&dev->refcnt));
-               BUG_TRAP(!dev->ip_ptr);
-               BUG_TRAP(!dev->ip6_ptr);
-               BUG_TRAP(!dev->dn_ptr);
-
-               /* It must be the very last action,
-                * after this 'dev' may point to freed up memory.
-                */
-               if (dev->destructor)
-                       dev->destructor(dev);
-       }
-
-out:
-       mutex_unlock(&net_todo_run_mutex);
-}
-
-/**
- *     alloc_netdev - allocate network device
- *     @sizeof_priv:   size of private data to allocate space for
- *     @name:          device name format string
- *     @setup:         callback to initialize device
- *
- *     Allocates a struct net_device with private data area for driver use
- *     and performs basic initialization.
- */
-struct net_device *alloc_netdev(int sizeof_priv, const char *name,
-               void (*setup)(struct net_device *))
-{
-       void *p;
-       struct net_device *dev;
-       int alloc_size;
-
-       /* ensure 32-byte alignment of both the device and private area */
-       alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
-       alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
-
-       p = kzalloc(alloc_size, GFP_KERNEL);
-       if (!p) {
-               printk(KERN_ERR "alloc_dev: Unable to allocate device.\n");
-               return NULL;
-       }
-
-       dev = (struct net_device *)
-               (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
-       dev->padded = (char *)dev - (char *)p;
-
-       if (sizeof_priv)
-               dev->priv = netdev_priv(dev);
-
-       setup(dev);
-       strcpy(dev->name, name);
-       return dev;
-}
-EXPORT_SYMBOL(alloc_netdev);
-
-/**
- *     free_netdev - free network device
- *     @dev: device
- *
- *     This function does the last stage of destroying an allocated device 
- *     interface. The reference to the device object is released.  
- *     If this is the last reference then it will be freed.
- */
-void free_netdev(struct net_device *dev)
-{
-#ifdef CONFIG_SYSFS
-       /*  Compatibility with error handling in drivers */
-       if (dev->reg_state == NETREG_UNINITIALIZED) {
-               kfree((char *)dev - dev->padded);
-               return;
-       }
-
-       BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
-       dev->reg_state = NETREG_RELEASED;
-
-       /* will free via class release */
-       class_device_put(&dev->class_dev);
-#else
-       kfree((char *)dev - dev->padded);
-#endif
-}
-/* Synchronize with packet receive processing. */
-void synchronize_net(void) 
-{
-       might_sleep();
-       synchronize_rcu();
-}
-
-/**
- *     unregister_netdevice - remove device from the kernel
- *     @dev: device
- *
- *     This function shuts down a device interface and removes it
- *     from the kernel tables. On success 0 is returned, on a failure
- *     a negative errno code is returned.
- *
- *     Callers must hold the rtnl semaphore.  You may want
- *     unregister_netdev() instead of this.
- */
-
-int unregister_netdevice(struct net_device *dev)
-{
-       struct net_device *d, **dp;
-
-       BUG_ON(dev_boot_phase);
-       ASSERT_RTNL();
-
-       /* Some devices call without registering for initialization unwind. */
-       if (dev->reg_state == NETREG_UNINITIALIZED) {
-               printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
-                                 "was registered\n", dev->name, dev);
-               return -ENODEV;
-       }
-
-       BUG_ON(dev->reg_state != NETREG_REGISTERED);
-
-       /* If device is running, close it first. */
-       if (dev->flags & IFF_UP)
-               dev_close(dev);
-
-       /* And unlink it from device chain. */
-       for (dp = &dev_base; (d = *dp) != NULL; dp = &d->next) {
-               if (d == dev) {
-                       write_lock_bh(&dev_base_lock);
-                       hlist_del(&dev->name_hlist);
-                       hlist_del(&dev->index_hlist);
-                       if (dev_tail == &dev->next)
-                               dev_tail = dp;
-                       *dp = d->next;
-                       write_unlock_bh(&dev_base_lock);
-                       break;
-               }
-       }
-       if (!d) {
-               printk(KERN_ERR "unregister net_device: '%s' not found\n",
-                      dev->name);
-               return -ENODEV;
-       }
-
-       dev->reg_state = NETREG_UNREGISTERING;
-
-       synchronize_net();
-
-       /* Shutdown queueing discipline. */
-       dev_shutdown(dev);
-
-       
-       /* Notify protocols, that we are about to destroy
-          this device. They should clean all the things.
-       */
-       raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
-       
-       /*
-        *      Flush the multicast chain
-        */
-       dev_mc_discard(dev);
-
-       if (dev->uninit)
-               dev->uninit(dev);
-
-       /* Notifier chain MUST detach us from master device. */
-       BUG_TRAP(!dev->master);
-
-       free_divert_blk(dev);
-
-       /* Finish processing unregister after unlock */
-       net_set_todo(dev);
-
-       synchronize_net();
-
-       dev_put(dev);
-       return 0;
-}
-
-/**
- *     unregister_netdev - remove device from the kernel
- *     @dev: device
- *
- *     This function shuts down a device interface and removes it
- *     from the kernel tables. On success 0 is returned, on a failure
- *     a negative errno code is returned.
- *
- *     This is just a wrapper for unregister_netdevice that takes
- *     the rtnl semaphore.  In general you want to use this and not
- *     unregister_netdevice.
- */
-void unregister_netdev(struct net_device *dev)
-{
-       rtnl_lock();
-       unregister_netdevice(dev);
-       rtnl_unlock();
-}
-
-EXPORT_SYMBOL(unregister_netdev);
-
-#ifdef CONFIG_HOTPLUG_CPU
-static int dev_cpu_callback(struct notifier_block *nfb,
-                           unsigned long action,
-                           void *ocpu)
-{
-       struct sk_buff **list_skb;
-       struct net_device **list_net;
-       struct sk_buff *skb;
-       unsigned int cpu, oldcpu = (unsigned long)ocpu;
-       struct softnet_data *sd, *oldsd;
-
-       if (action != CPU_DEAD)
-               return NOTIFY_OK;
-
-       local_irq_disable();
-       cpu = smp_processor_id();
-       sd = &per_cpu(softnet_data, cpu);
-       oldsd = &per_cpu(softnet_data, oldcpu);
-
-       /* Find end of our completion_queue. */
-       list_skb = &sd->completion_queue;
-       while (*list_skb)
-               list_skb = &(*list_skb)->next;
-       /* Append completion queue from offline CPU. */
-       *list_skb = oldsd->completion_queue;
-       oldsd->completion_queue = NULL;
-
-       /* Find end of our output_queue. */
-       list_net = &sd->output_queue;
-       while (*list_net)
-               list_net = &(*list_net)->next_sched;
-       /* Append output queue from offline CPU. */
-       *list_net = oldsd->output_queue;
-       oldsd->output_queue = NULL;
-
-       raise_softirq_irqoff(NET_TX_SOFTIRQ);
-       local_irq_enable();
-
-       /* Process offline CPU's input_pkt_queue */
-       while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
-               netif_rx(skb);
-
-       return NOTIFY_OK;
-}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-
-/*
- *     Initialize the DEV module. At boot time this walks the device list and
- *     unhooks any devices that fail to initialise (normally hardware not
- *     present) and leaves us with a valid list of present and active devices.
- *
- */
-
-/*
- *       This is called single threaded during boot, so no need
- *       to take the rtnl semaphore.
- */
-static int __init net_dev_init(void)
-{
-       int i, rc = -ENOMEM;
-
-       BUG_ON(!dev_boot_phase);
-
-       net_random_init();
-
-       if (dev_proc_init())
-               goto out;
-
-       if (netdev_sysfs_init())
-               goto out;
-
-       INIT_LIST_HEAD(&ptype_all);
-       for (i = 0; i < 16; i++) 
-               INIT_LIST_HEAD(&ptype_base[i]);
-
-       for (i = 0; i < ARRAY_SIZE(dev_name_head); i++)
-               INIT_HLIST_HEAD(&dev_name_head[i]);
-
-       for (i = 0; i < ARRAY_SIZE(dev_index_head); i++)
-               INIT_HLIST_HEAD(&dev_index_head[i]);
-
-       /*
-        *      Initialise the packet receive queues.
-        */
-
-       for_each_possible_cpu(i) {
-               struct softnet_data *queue;
-
-               queue = &per_cpu(softnet_data, i);
-               skb_queue_head_init(&queue->input_pkt_queue);
-               queue->completion_queue = NULL;
-               INIT_LIST_HEAD(&queue->poll_list);
-               set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
-               queue->backlog_dev.weight = weight_p;
-               queue->backlog_dev.poll = process_backlog;
-               atomic_set(&queue->backlog_dev.refcnt, 1);
-       }
-
-       dev_boot_phase = 0;
-
-       open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
-       open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
-
-       hotcpu_notifier(dev_cpu_callback, 0);
-       dst_init();
-       dev_mcast_init();
-       rc = 0;
-out:
-       return rc;
-}
-
-subsys_initcall(net_dev_init);
-
-EXPORT_SYMBOL(__dev_get_by_index);
-EXPORT_SYMBOL(__dev_get_by_name);
-EXPORT_SYMBOL(__dev_remove_pack);
-EXPORT_SYMBOL(dev_valid_name);
-EXPORT_SYMBOL(dev_add_pack);
-EXPORT_SYMBOL(dev_alloc_name);
-EXPORT_SYMBOL(dev_close);
-EXPORT_SYMBOL(dev_get_by_flags);
-EXPORT_SYMBOL(dev_get_by_index);
-EXPORT_SYMBOL(dev_get_by_name);
-EXPORT_SYMBOL(dev_open);
-EXPORT_SYMBOL(dev_queue_xmit);
-EXPORT_SYMBOL(dev_remove_pack);
-EXPORT_SYMBOL(dev_set_allmulti);
-EXPORT_SYMBOL(dev_set_promiscuity);
-EXPORT_SYMBOL(dev_change_flags);
-EXPORT_SYMBOL(dev_set_mtu);
-EXPORT_SYMBOL(dev_set_mac_address);
-EXPORT_SYMBOL(free_netdev);
-EXPORT_SYMBOL(netdev_boot_setup_check);
-EXPORT_SYMBOL(netdev_set_master);
-EXPORT_SYMBOL(netdev_state_change);
-EXPORT_SYMBOL(netif_receive_skb);
-EXPORT_SYMBOL(netif_rx);
-EXPORT_SYMBOL(register_gifconf);
-EXPORT_SYMBOL(register_netdevice);
-EXPORT_SYMBOL(register_netdevice_notifier);
-EXPORT_SYMBOL(skb_checksum_help);
-EXPORT_SYMBOL(synchronize_net);
-EXPORT_SYMBOL(unregister_netdevice);
-EXPORT_SYMBOL(unregister_netdevice_notifier);
-EXPORT_SYMBOL(net_enable_timestamp);
-EXPORT_SYMBOL(net_disable_timestamp);
-EXPORT_SYMBOL(dev_get_flags);
-EXPORT_SYMBOL(skb_checksum_setup);
-
-#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
-EXPORT_SYMBOL(br_handle_frame_hook);
-EXPORT_SYMBOL(br_fdb_get_hook);
-EXPORT_SYMBOL(br_fdb_put_hook);
-#endif
-
-#ifdef CONFIG_KMOD
-EXPORT_SYMBOL(dev_load);
-#endif
-
-EXPORT_PER_CPU_SYMBOL(softnet_data);
diff --git a/net/ipv4/tcp.c~ b/net/ipv4/tcp.c~
deleted file mode 100644 (file)
index f3a1f9b..0000000
+++ /dev/null
@@ -1,2261 +0,0 @@
-/*
- * INET                An implementation of the TCP/IP protocol suite for the LINUX
- *             operating system.  INET is implemented using the  BSD Socket
- *             interface as the means of communication with the user level.
- *
- *             Implementation of the Transmission Control Protocol(TCP).
- *
- * Version:    $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
- *
- * Authors:    Ross Biro
- *             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *             Mark Evans, <evansmp@uhura.aston.ac.uk>
- *             Corey Minyard <wf-rch!minyard@relay.EU.net>
- *             Florian La Roche, <flla@stud.uni-sb.de>
- *             Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
- *             Linus Torvalds, <torvalds@cs.helsinki.fi>
- *             Alan Cox, <gw4pts@gw4pts.ampr.org>
- *             Matthew Dillon, <dillon@apollo.west.oic.com>
- *             Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- *             Jorge Cwik, <jorge@laser.satlink.net>
- *
- * Fixes:
- *             Alan Cox        :       Numerous verify_area() calls
- *             Alan Cox        :       Set the ACK bit on a reset
- *             Alan Cox        :       Stopped it crashing if it closed while
- *                                     sk->inuse=1 and was trying to connect
- *                                     (tcp_err()).
- *             Alan Cox        :       All icmp error handling was broken
- *                                     pointers passed where wrong and the
- *                                     socket was looked up backwards. Nobody
- *                                     tested any icmp error code obviously.
- *             Alan Cox        :       tcp_err() now handled properly. It
- *                                     wakes people on errors. poll
- *                                     behaves and the icmp error race
- *                                     has gone by moving it into sock.c
- *             Alan Cox        :       tcp_send_reset() fixed to work for
- *                                     everything not just packets for
- *                                     unknown sockets.
- *             Alan Cox        :       tcp option processing.
- *             Alan Cox        :       Reset tweaked (still not 100%) [Had
- *                                     syn rule wrong]
- *             Herp Rosmanith  :       More reset fixes
- *             Alan Cox        :       No longer acks invalid rst frames.
- *                                     Acking any kind of RST is right out.
- *             Alan Cox        :       Sets an ignore me flag on an rst
- *                                     receive otherwise odd bits of prattle
- *                                     escape still
- *             Alan Cox        :       Fixed another acking RST frame bug.
- *                                     Should stop LAN workplace lockups.
- *             Alan Cox        :       Some tidyups using the new skb list
- *                                     facilities
- *             Alan Cox        :       sk->keepopen now seems to work
- *             Alan Cox        :       Pulls options out correctly on accepts
- *             Alan Cox        :       Fixed assorted sk->rqueue->next errors
- *             Alan Cox        :       PSH doesn't end a TCP read. Switched a
- *                                     bit to skb ops.
- *             Alan Cox        :       Tidied tcp_data to avoid a potential
- *                                     nasty.
- *             Alan Cox        :       Added some better commenting, as the
- *                                     tcp is hard to follow
- *             Alan Cox        :       Removed incorrect check for 20 * psh
- *     Michael O'Reilly        :       ack < copied bug fix.
- *     Johannes Stille         :       Misc tcp fixes (not all in yet).
- *             Alan Cox        :       FIN with no memory -> CRASH
- *             Alan Cox        :       Added socket option proto entries.
- *                                     Also added awareness of them to accept.
- *             Alan Cox        :       Added TCP options (SOL_TCP)
- *             Alan Cox        :       Switched wakeup calls to callbacks,
- *                                     so the kernel can layer network
- *                                     sockets.
- *             Alan Cox        :       Use ip_tos/ip_ttl settings.
- *             Alan Cox        :       Handle FIN (more) properly (we hope).
- *             Alan Cox        :       RST frames sent on unsynchronised
- *                                     state ack error.
- *             Alan Cox        :       Put in missing check for SYN bit.
- *             Alan Cox        :       Added tcp_select_window() aka NET2E
- *                                     window non shrink trick.
- *             Alan Cox        :       Added a couple of small NET2E timer
- *                                     fixes
- *             Charles Hedrick :       TCP fixes
- *             Toomas Tamm     :       TCP window fixes
- *             Alan Cox        :       Small URG fix to rlogin ^C ack fight
- *             Charles Hedrick :       Rewrote most of it to actually work
- *             Linus           :       Rewrote tcp_read() and URG handling
- *                                     completely
- *             Gerhard Koerting:       Fixed some missing timer handling
- *             Matthew Dillon  :       Reworked TCP machine states as per RFC
- *             Gerhard Koerting:       PC/TCP workarounds
- *             Adam Caldwell   :       Assorted timer/timing errors
- *             Matthew Dillon  :       Fixed another RST bug
- *             Alan Cox        :       Move to kernel side addressing changes.
- *             Alan Cox        :       Beginning work on TCP fastpathing
- *                                     (not yet usable)
- *             Arnt Gulbrandsen:       Turbocharged tcp_check() routine.
- *             Alan Cox        :       TCP fast path debugging
- *             Alan Cox        :       Window clamping
- *             Michael Riepe   :       Bug in tcp_check()
- *             Matt Dillon     :       More TCP improvements and RST bug fixes
- *             Matt Dillon     :       Yet more small nasties remove from the
- *                                     TCP code (Be very nice to this man if
- *                                     tcp finally works 100%) 8)
- *             Alan Cox        :       BSD accept semantics.
- *             Alan Cox        :       Reset on closedown bug.
- *     Peter De Schrijver      :       ENOTCONN check missing in tcp_sendto().
- *             Michael Pall    :       Handle poll() after URG properly in
- *                                     all cases.
- *             Michael Pall    :       Undo the last fix in tcp_read_urg()
- *                                     (multi URG PUSH broke rlogin).
- *             Michael Pall    :       Fix the multi URG PUSH problem in
- *                                     tcp_readable(), poll() after URG
- *                                     works now.
- *             Michael Pall    :       recv(...,MSG_OOB) never blocks in the
- *                                     BSD api.
- *             Alan Cox        :       Changed the semantics of sk->socket to
- *                                     fix a race and a signal problem with
- *                                     accept() and async I/O.
- *             Alan Cox        :       Relaxed the rules on tcp_sendto().
- *             Yury Shevchuk   :       Really fixed accept() blocking problem.
- *             Craig I. Hagan  :       Allow for BSD compatible TIME_WAIT for
- *                                     clients/servers which listen in on
- *                                     fixed ports.
- *             Alan Cox        :       Cleaned the above up and shrank it to
- *                                     a sensible code size.
- *             Alan Cox        :       Self connect lockup fix.
- *             Alan Cox        :       No connect to multicast.
- *             Ross Biro       :       Close unaccepted children on master
- *                                     socket close.
- *             Alan Cox        :       Reset tracing code.
- *             Alan Cox        :       Spurious resets on shutdown.
- *             Alan Cox        :       Giant 15 minute/60 second timer error
- *             Alan Cox        :       Small whoops in polling before an
- *                                     accept.
- *             Alan Cox        :       Kept the state trace facility since
- *                                     it's handy for debugging.
- *             Alan Cox        :       More reset handler fixes.
- *             Alan Cox        :       Started rewriting the code based on
- *                                     the RFC's for other useful protocol
- *                                     references see: Comer, KA9Q NOS, and
- *                                     for a reference on the difference
- *                                     between specifications and how BSD
- *                                     works see the 4.4lite source.
- *             A.N.Kuznetsov   :       Don't time wait on completion of tidy
- *                                     close.
- *             Linus Torvalds  :       Fin/Shutdown & copied_seq changes.
- *             Linus Torvalds  :       Fixed BSD port reuse to work first syn
- *             Alan Cox        :       Reimplemented timers as per the RFC
- *                                     and using multiple timers for sanity.
- *             Alan Cox        :       Small bug fixes, and a lot of new
- *                                     comments.
- *             Alan Cox        :       Fixed dual reader crash by locking
- *                                     the buffers (much like datagram.c)
- *             Alan Cox        :       Fixed stuck sockets in probe. A probe
- *                                     now gets fed up of retrying without
- *                                     (even a no space) answer.
- *             Alan Cox        :       Extracted closing code better
- *             Alan Cox        :       Fixed the closing state machine to
- *                                     resemble the RFC.
- *             Alan Cox        :       More 'per spec' fixes.
- *             Jorge Cwik      :       Even faster checksumming.
- *             Alan Cox        :       tcp_data() doesn't ack illegal PSH
- *                                     only frames. At least one pc tcp stack
- *                                     generates them.
- *             Alan Cox        :       Cache last socket.
- *             Alan Cox        :       Per route irtt.
- *             Matt Day        :       poll()->select() match BSD precisely on error
- *             Alan Cox        :       New buffers
- *             Marc Tamsky     :       Various sk->prot->retransmits and
- *                                     sk->retransmits misupdating fixed.
- *                                     Fixed tcp_write_timeout: stuck close,
- *                                     and TCP syn retries gets used now.
- *             Mark Yarvis     :       In tcp_read_wakeup(), don't send an
- *                                     ack if state is TCP_CLOSED.
- *             Alan Cox        :       Look up device on a retransmit - routes may
- *                                     change. Doesn't yet cope with MSS shrink right
- *                                     but it's a start!
- *             Marc Tamsky     :       Closing in closing fixes.
- *             Mike Shaver     :       RFC1122 verifications.
- *             Alan Cox        :       rcv_saddr errors.
- *             Alan Cox        :       Block double connect().
- *             Alan Cox        :       Small hooks for enSKIP.
- *             Alexey Kuznetsov:       Path MTU discovery.
- *             Alan Cox        :       Support soft errors.
- *             Alan Cox        :       Fix MTU discovery pathological case
- *                                     when the remote claims no mtu!
- *             Marc Tamsky     :       TCP_CLOSE fix.
- *             Colin (G3TNE)   :       Send a reset on syn ack replies in
- *                                     window but wrong (fixes NT lpd problems)
- *             Pedro Roque     :       Better TCP window handling, delayed ack.
- *             Joerg Reuter    :       No modification of locked buffers in
- *                                     tcp_do_retransmit()
- *             Eric Schenk     :       Changed receiver side silly window
- *                                     avoidance algorithm to BSD style
- *                                     algorithm. This doubles throughput
- *                                     against machines running Solaris,
- *                                     and seems to result in general
- *                                     improvement.
- *     Stefan Magdalinski      :       adjusted tcp_readable() to fix FIONREAD
- *     Willy Konynenberg       :       Transparent proxying support.
- *     Mike McLagan            :       Routing by source
- *             Keith Owens     :       Do proper merging with partial SKB's in
- *                                     tcp_do_sendmsg to avoid burstiness.
- *             Eric Schenk     :       Fix fast close down bug with
- *                                     shutdown() followed by close().
- *             Andi Kleen      :       Make poll agree with SIGIO
- *     Salvatore Sanfilippo    :       Support SO_LINGER with linger == 1 and
- *                                     lingertime == 0 (RFC 793 ABORT Call)
- *     Hirokazu Takahashi      :       Use copy_from_user() instead of
- *                                     csum_and_copy_from_user() if possible.
- *
- *             This program is free software; you can redistribute it and/or
- *             modify it under the terms of the GNU General Public License
- *             as published by the Free Software Foundation; either version
- *             2 of the License, or(at your option) any later version.
- *
- * Description of States:
- *
- *     TCP_SYN_SENT            sent a connection request, waiting for ack
- *
- *     TCP_SYN_RECV            received a connection request, sent ack,
- *                             waiting for final ack in three-way handshake.
- *
- *     TCP_ESTABLISHED         connection established
- *
- *     TCP_FIN_WAIT1           our side has shutdown, waiting to complete
- *                             transmission of remaining buffered data
- *
- *     TCP_FIN_WAIT2           all buffered data sent, waiting for remote
- *                             to shutdown
- *
- *     TCP_CLOSING             both sides have shutdown but we still have
- *                             data we have to finish sending
- *
- *     TCP_TIME_WAIT           timeout to catch resent junk before entering
- *                             closed, can only be entered from FIN_WAIT2
- *                             or CLOSING.  Required because the other end
- *                             may not have gotten our last ACK causing it
- *                             to retransmit the data packet (which we ignore)
- *
- *     TCP_CLOSE_WAIT          remote side has shutdown and is waiting for
- *                             us to finish writing our data and to shutdown
- *                             (we have to close() to move on to LAST_ACK)
- *
- *     TCP_LAST_ACK            out side has shutdown after remote has
- *                             shutdown.  There may still be data in our
- *                             buffer that we have to finish sending
- *
- *     TCP_CLOSE               socket is finished
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/poll.h>
-#include <linux/init.h>
-#include <linux/smp_lock.h>
-#include <linux/fs.h>
-#include <linux/random.h>
-#include <linux/bootmem.h>
-#include <linux/cache.h>
-#include <linux/err.h>
-
-#include <net/icmp.h>
-#include <net/tcp.h>
-#include <net/xfrm.h>
-#include <net/ip.h>
-
-
-#include <asm/uaccess.h>
-#include <asm/ioctls.h>
-
-int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
-
-DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
-
-atomic_t tcp_orphan_count = ATOMIC_INIT(0);
-
-EXPORT_SYMBOL_GPL(tcp_orphan_count);
-
-int sysctl_tcp_mem[3] __read_mostly;
-int sysctl_tcp_wmem[3] __read_mostly;
-int sysctl_tcp_rmem[3] __read_mostly;
-
-EXPORT_SYMBOL(sysctl_tcp_mem);
-EXPORT_SYMBOL(sysctl_tcp_rmem);
-EXPORT_SYMBOL(sysctl_tcp_wmem);
-
-atomic_t tcp_memory_allocated; /* Current allocated memory. */
-atomic_t tcp_sockets_allocated;        /* Current number of TCP sockets. */
-
-EXPORT_SYMBOL(tcp_memory_allocated);
-EXPORT_SYMBOL(tcp_sockets_allocated);
-
-/*
- * Pressure flag: try to collapse.
- * Technical note: it is used by multiple contexts non atomically.
- * All the sk_stream_mem_schedule() is of this nature: accounting
- * is strict, actions are advisory and have some latency.
- */
-int tcp_memory_pressure;
-
-EXPORT_SYMBOL(tcp_memory_pressure);
-
-void tcp_enter_memory_pressure(void)
-{
-       if (!tcp_memory_pressure) {
-               NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
-               tcp_memory_pressure = 1;
-       }
-}
-
-EXPORT_SYMBOL(tcp_enter_memory_pressure);
-
-/*
- *     Wait for a TCP event.
- *
- *     Note that we don't need to lock the socket, as the upper poll layers
- *     take care of normal races (between the test and the event) and we don't
- *     go look at any of the socket buffers directly.
- */
-unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
-{
-       unsigned int mask;
-       struct sock *sk = sock->sk;
-       struct tcp_sock *tp = tcp_sk(sk);
-
-       poll_wait(file, sk->sk_sleep, wait);
-       if (sk->sk_state == TCP_LISTEN)
-               return inet_csk_listen_poll(sk);
-
-       /* Socket is not locked. We are protected from async events
-          by poll logic and correct handling of state changes
-          made by another threads is impossible in any case.
-        */
-
-       mask = 0;
-       if (sk->sk_err)
-               mask = POLLERR;
-
-       /*
-        * POLLHUP is certainly not done right. But poll() doesn't
-        * have a notion of HUP in just one direction, and for a
-        * socket the read side is more interesting.
-        *
-        * Some poll() documentation says that POLLHUP is incompatible
-        * with the POLLOUT/POLLWR flags, so somebody should check this
-        * all. But careful, it tends to be safer to return too many
-        * bits than too few, and you can easily break real applications
-        * if you don't tell them that something has hung up!
-        *
-        * Check-me.
-        *
-        * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
-        * our fs/select.c). It means that after we received EOF,
-        * poll always returns immediately, making impossible poll() on write()
-        * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
-        * if and only if shutdown has been made in both directions.
-        * Actually, it is interesting to look how Solaris and DUX
-        * solve this dilemma. I would prefer, if PULLHUP were maskable,
-        * then we could set it on SND_SHUTDOWN. BTW examples given
-        * in Stevens' books assume exactly this behaviour, it explains
-        * why PULLHUP is incompatible with POLLOUT.    --ANK
-        *
-        * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
-        * blocking on fresh not-connected or disconnected socket. --ANK
-        */
-       if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
-               mask |= POLLHUP;
-       if (sk->sk_shutdown & RCV_SHUTDOWN)
-               mask |= POLLIN | POLLRDNORM | POLLRDHUP;
-
-       /* Connected? */
-       if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
-               /* Potential race condition. If read of tp below will
-                * escape above sk->sk_state, we can be illegally awaken
-                * in SYN_* states. */
-               if ((tp->rcv_nxt != tp->copied_seq) &&
-                   (tp->urg_seq != tp->copied_seq ||
-                    tp->rcv_nxt != tp->copied_seq + 1 ||
-                    sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
-                       mask |= POLLIN | POLLRDNORM;
-
-               if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
-                       if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
-                               mask |= POLLOUT | POLLWRNORM;
-                       } else {  /* send SIGIO later */
-                               set_bit(SOCK_ASYNC_NOSPACE,
-                                       &sk->sk_socket->flags);
-                               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-
-                               /* Race breaker. If space is freed after
-                                * wspace test but before the flags are set,
-                                * IO signal will be lost.
-                                */
-                               if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
-                                       mask |= POLLOUT | POLLWRNORM;
-                       }
-               }
-
-               if (tp->urg_data & TCP_URG_VALID)
-                       mask |= POLLPRI;
-       }
-       return mask;
-}
-
-int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       int answ;
-
-       switch (cmd) {
-       case SIOCINQ:
-               if (sk->sk_state == TCP_LISTEN)
-                       return -EINVAL;
-
-               lock_sock(sk);
-               if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
-                       answ = 0;
-               else if (sock_flag(sk, SOCK_URGINLINE) ||
-                        !tp->urg_data ||
-                        before(tp->urg_seq, tp->copied_seq) ||
-                        !before(tp->urg_seq, tp->rcv_nxt)) {
-                       answ = tp->rcv_nxt - tp->copied_seq;
-
-                       /* Subtract 1, if FIN is in queue. */
-                       if (answ && !skb_queue_empty(&sk->sk_receive_queue))
-                               answ -=
-                      ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
-               } else
-                       answ = tp->urg_seq - tp->copied_seq;
-               release_sock(sk);
-               break;
-       case SIOCATMARK:
-               answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
-               break;
-       case SIOCOUTQ:
-               if (sk->sk_state == TCP_LISTEN)
-                       return -EINVAL;
-
-               if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
-                       answ = 0;
-               else
-                       answ = tp->write_seq - tp->snd_una;
-               break;
-       default:
-               return -ENOIOCTLCMD;
-       };
-
-       return put_user(answ, (int __user *)arg);
-}
-
-static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
-{
-       TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
-       tp->pushed_seq = tp->write_seq;
-}
-
-static inline int forced_push(struct tcp_sock *tp)
-{
-       return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
-}
-
-static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
-                             struct sk_buff *skb)
-{
-       skb->csum = 0;
-       TCP_SKB_CB(skb)->seq = tp->write_seq;
-       TCP_SKB_CB(skb)->end_seq = tp->write_seq;
-       TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
-       TCP_SKB_CB(skb)->sacked = 0;
-       skb_header_release(skb);
-       __skb_queue_tail(&sk->sk_write_queue, skb);
-       sk_charge_skb(sk, skb);
-       if (!sk->sk_send_head)
-               sk->sk_send_head = skb;
-       if (tp->nonagle & TCP_NAGLE_PUSH)
-               tp->nonagle &= ~TCP_NAGLE_PUSH; 
-}
-
-static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
-                               struct sk_buff *skb)
-{
-       if (flags & MSG_OOB) {
-               tp->urg_mode = 1;
-               tp->snd_up = tp->write_seq;
-               TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
-       }
-}
-
-static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
-                           int mss_now, int nonagle)
-{
-       if (sk->sk_send_head) {
-               struct sk_buff *skb = sk->sk_write_queue.prev;
-               if (!(flags & MSG_MORE) || forced_push(tp))
-                       tcp_mark_push(tp, skb);
-               tcp_mark_urg(tp, flags, skb);
-               __tcp_push_pending_frames(sk, tp, mss_now,
-                                         (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
-       }
-}
-
-static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
-                        size_t psize, int flags)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       int mss_now, size_goal;
-       int err;
-       ssize_t copied;
-       long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
-
-       /* Wait for a connection to finish. */
-       if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
-               if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
-                       goto out_err;
-
-       clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
-
-       mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
-       size_goal = tp->xmit_size_goal;
-       copied = 0;
-
-       err = -EPIPE;
-       if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
-               goto do_error;
-
-       while (psize > 0) {
-               struct sk_buff *skb = sk->sk_write_queue.prev;
-               struct page *page = pages[poffset / PAGE_SIZE];
-               int copy, i, can_coalesce;
-               int offset = poffset % PAGE_SIZE;
-               int size = min_t(size_t, psize, PAGE_SIZE - offset);
-
-               if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
-new_segment:
-                       if (!sk_stream_memory_free(sk))
-                               goto wait_for_sndbuf;
-
-                       skb = sk_stream_alloc_pskb(sk, 0, 0,
-                                                  sk->sk_allocation);
-                       if (!skb)
-                               goto wait_for_memory;
-
-                       skb_entail(sk, tp, skb);
-                       copy = size_goal;
-               }
-
-               if (copy > size)
-                       copy = size;
-
-               i = skb_shinfo(skb)->nr_frags;
-               can_coalesce = skb_can_coalesce(skb, i, page, offset);
-               if (!can_coalesce && i >= MAX_SKB_FRAGS) {
-                       tcp_mark_push(tp, skb);
-                       goto new_segment;
-               }
-               if (!sk_stream_wmem_schedule(sk, copy))
-                       goto wait_for_memory;
-               
-               if (can_coalesce) {
-                       skb_shinfo(skb)->frags[i - 1].size += copy;
-               } else {
-                       get_page(page);
-                       skb_fill_page_desc(skb, i, page, offset, copy);
-               }
-
-               skb->len += copy;
-               skb->data_len += copy;
-               skb->truesize += copy;
-               sk->sk_wmem_queued += copy;
-               sk->sk_forward_alloc -= copy;
-               skb->ip_summed = CHECKSUM_HW;
-               tp->write_seq += copy;
-               TCP_SKB_CB(skb)->end_seq += copy;
-               skb_shinfo(skb)->gso_segs = 0;
-
-               if (!copied)
-                       TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
-
-               copied += copy;
-               poffset += copy;
-               if (!(psize -= copy))
-                       goto out;
-
-               if (skb->len < mss_now || (flags & MSG_OOB))
-                       continue;
-
-               if (forced_push(tp)) {
-                       tcp_mark_push(tp, skb);
-                       __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
-               } else if (skb == sk->sk_send_head)
-                       tcp_push_one(sk, mss_now);
-               continue;
-
-wait_for_sndbuf:
-               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-wait_for_memory:
-               if (copied)
-                       tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
-
-               if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
-                       goto do_error;
-
-               mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
-               size_goal = tp->xmit_size_goal;
-       }
-
-out:
-       if (copied)
-               tcp_push(sk, tp, flags, mss_now, tp->nonagle);
-       return copied;
-
-do_error:
-       if (copied)
-               goto out;
-out_err:
-       return sk_stream_error(sk, flags, err);
-}
-
-ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
-                    size_t size, int flags)
-{
-       ssize_t res;
-       struct sock *sk = sock->sk;
-
-       if (!(sk->sk_route_caps & NETIF_F_SG) ||
-           !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
-               return sock_no_sendpage(sock, page, offset, size, flags);
-
-       lock_sock(sk);
-       TCP_CHECK_TIMER(sk);
-       res = do_tcp_sendpages(sk, &page, offset, size, flags);
-       TCP_CHECK_TIMER(sk);
-       release_sock(sk);
-       return res;
-}
-
-#define TCP_PAGE(sk)   (sk->sk_sndmsg_page)
-#define TCP_OFF(sk)    (sk->sk_sndmsg_off)
-
-static inline int select_size(struct sock *sk, struct tcp_sock *tp)
-{
-       int tmp = tp->mss_cache;
-
-       if (sk->sk_route_caps & NETIF_F_SG) {
-               if (sk->sk_route_caps & NETIF_F_TSO)
-                       tmp = 0;
-               else {
-                       int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
-
-                       if (tmp >= pgbreak &&
-                           tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
-                               tmp = pgbreak;
-               }
-       }
-
-       return tmp;
-}
-
-int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-               size_t size)
-{
-       struct iovec *iov;
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct sk_buff *skb;
-       int iovlen, flags;
-       int mss_now, size_goal;
-       int err, copied;
-       long timeo;
-
-       lock_sock(sk);
-       TCP_CHECK_TIMER(sk);
-
-       flags = msg->msg_flags;
-       timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
-
-       /* Wait for a connection to finish. */
-       if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
-               if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
-                       goto out_err;
-
-       /* This should be in poll */
-       clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
-
-       mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
-       size_goal = tp->xmit_size_goal;
-
-       /* Ok commence sending. */
-       iovlen = msg->msg_iovlen;
-       iov = msg->msg_iov;
-       copied = 0;
-
-       err = -EPIPE;
-       if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
-               goto do_error;
-
-       while (--iovlen >= 0) {
-               int seglen = iov->iov_len;
-               unsigned char __user *from = iov->iov_base;
-
-               iov++;
-
-               while (seglen > 0) {
-                       int copy;
-
-                       skb = sk->sk_write_queue.prev;
-
-                       if (!sk->sk_send_head ||
-                           (copy = size_goal - skb->len) <= 0) {
-
-new_segment:
-                               /* Allocate new segment. If the interface is SG,
-                                * allocate skb fitting to single page.
-                                */
-                               if (!sk_stream_memory_free(sk))
-                                       goto wait_for_sndbuf;
-
-                               skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
-                                                          0, sk->sk_allocation);
-                               if (!skb)
-                                       goto wait_for_memory;
-
-                               /*
-                                * Check whether we can use HW checksum.
-                                */
-                               if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
-                                       skb->ip_summed = CHECKSUM_HW;
-
-                               skb_entail(sk, tp, skb);
-                               copy = size_goal;
-                       }
-
-                       /* Try to append data to the end of skb. */
-                       if (copy > seglen)
-                               copy = seglen;
-
-                       /* Where to copy to? */
-                       if (skb_tailroom(skb) > 0) {
-                               /* We have some space in skb head. Superb! */
-                               if (copy > skb_tailroom(skb))
-                                       copy = skb_tailroom(skb);
-                               if ((err = skb_add_data(skb, from, copy)) != 0)
-                                       goto do_fault;
-                       } else {
-                               int merge = 0;
-                               int i = skb_shinfo(skb)->nr_frags;
-                               struct page *page = TCP_PAGE(sk);
-                               int off = TCP_OFF(sk);
-
-                               if (skb_can_coalesce(skb, i, page, off) &&
-                                   off != PAGE_SIZE) {
-                                       /* We can extend the last page
-                                        * fragment. */
-                                       merge = 1;
-                               } else if (i == MAX_SKB_FRAGS ||
-                                          (!i &&
-                                          !(sk->sk_route_caps & NETIF_F_SG))) {
-                                       /* Need to add new fragment and cannot
-                                        * do this because interface is non-SG,
-                                        * or because all the page slots are
-                                        * busy. */
-                                       tcp_mark_push(tp, skb);
-                                       goto new_segment;
-                               } else if (page) {
-                                       if (off == PAGE_SIZE) {
-                                               put_page(page);
-                                               TCP_PAGE(sk) = page = NULL;
-                                               off = 0;
-                                       }
-                               } else
-                                       off = 0;
-
-                               if (copy > PAGE_SIZE - off)
-                                       copy = PAGE_SIZE - off;
-
-                               if (!sk_stream_wmem_schedule(sk, copy))
-                                       goto wait_for_memory;
-
-                               if (!page) {
-                                       /* Allocate new cache page. */
-                                       if (!(page = sk_stream_alloc_page(sk)))
-                                               goto wait_for_memory;
-                               }
-
-                               /* Time to copy data. We are close to
-                                * the end! */
-                               err = skb_copy_to_page(sk, from, skb, page,
-                                                      off, copy);
-                               if (err) {
-                                       /* If this page was new, give it to the
-                                        * socket so it does not get leaked.
-                                        */
-                                       if (!TCP_PAGE(sk)) {
-                                               TCP_PAGE(sk) = page;
-                                               TCP_OFF(sk) = 0;
-                                       }
-                                       goto do_error;
-                               }
-
-                               /* Update the skb. */
-                               if (merge) {
-                                       skb_shinfo(skb)->frags[i - 1].size +=
-                                                                       copy;
-                               } else {
-                                       skb_fill_page_desc(skb, i, page, off, copy);
-                                       if (TCP_PAGE(sk)) {
-                                               get_page(page);
-                                       } else if (off + copy < PAGE_SIZE) {
-                                               get_page(page);
-                                               TCP_PAGE(sk) = page;
-                                       }
-                               }
-
-                               TCP_OFF(sk) = off + copy;
-                       }
-
-                       if (!copied)
-                               TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
-
-                       tp->write_seq += copy;
-                       TCP_SKB_CB(skb)->end_seq += copy;
-                       skb_shinfo(skb)->gso_segs = 0;
-
-                       from += copy;
-                       copied += copy;
-                       if ((seglen -= copy) == 0 && iovlen == 0)
-                               goto out;
-
-                       if (skb->len < mss_now || (flags & MSG_OOB))
-                               continue;
-
-                       if (forced_push(tp)) {
-                               tcp_mark_push(tp, skb);
-                               __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
-                       } else if (skb == sk->sk_send_head)
-                               tcp_push_one(sk, mss_now);
-                       continue;
-
-wait_for_sndbuf:
-                       set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-wait_for_memory:
-                       if (copied)
-                               tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
-
-                       if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
-                               goto do_error;
-
-                       mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
-                       size_goal = tp->xmit_size_goal;
-               }
-       }
-
-out:
-       if (copied)
-               tcp_push(sk, tp, flags, mss_now, tp->nonagle);
-       TCP_CHECK_TIMER(sk);
-       release_sock(sk);
-       return copied;
-
-do_fault:
-       if (!skb->len) {
-               if (sk->sk_send_head == skb)
-                       sk->sk_send_head = NULL;
-               __skb_unlink(skb, &sk->sk_write_queue);
-               sk_stream_free_skb(sk, skb);
-       }
-
-do_error:
-       if (copied)
-               goto out;
-out_err:
-       err = sk_stream_error(sk, flags, err);
-       TCP_CHECK_TIMER(sk);
-       release_sock(sk);
-       return err;
-}
-
-/*
- *     Handle reading urgent data. BSD has very simple semantics for
- *     this, no blocking and very strange errors 8)
- */
-
-static int tcp_recv_urg(struct sock *sk, long timeo,
-                       struct msghdr *msg, int len, int flags,
-                       int *addr_len)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-
-       /* No URG data to read. */
-       if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
-           tp->urg_data == TCP_URG_READ)
-               return -EINVAL; /* Yes this is right ! */
-
-       if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
-               return -ENOTCONN;
-
-       if (tp->urg_data & TCP_URG_VALID) {
-               int err = 0;
-               char c = tp->urg_data;
-
-               if (!(flags & MSG_PEEK))
-                       tp->urg_data = TCP_URG_READ;
-
-               /* Read urgent data. */
-               msg->msg_flags |= MSG_OOB;
-
-               if (len > 0) {
-                       if (!(flags & MSG_TRUNC))
-                               err = memcpy_toiovec(msg->msg_iov, &c, 1);
-                       len = 1;
-               } else
-                       msg->msg_flags |= MSG_TRUNC;
-
-               return err ? -EFAULT : len;
-       }
-
-       if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
-               return 0;
-
-       /* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
-        * the available implementations agree in this case:
-        * this call should never block, independent of the
-        * blocking state of the socket.
-        * Mike <pall@rz.uni-karlsruhe.de>
-        */
-       return -EAGAIN;
-}
-
-/* Clean up the receive buffer for full frames taken by the user,
- * then send an ACK if necessary.  COPIED is the number of bytes
- * tcp_recvmsg has given to the user so far, it speeds up the
- * calculation of whether or not we must ACK for the sake of
- * a window update.
- */
-void cleanup_rbuf(struct sock *sk, int copied)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       int time_to_ack = 0;
-
-#if TCP_DEBUG
-       struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
-
-       BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
-#endif
-
-       if (inet_csk_ack_scheduled(sk)) {
-               const struct inet_connection_sock *icsk = inet_csk(sk);
-                  /* Delayed ACKs frequently hit locked sockets during bulk
-                   * receive. */
-               if (icsk->icsk_ack.blocked ||
-                   /* Once-per-two-segments ACK was not sent by tcp_input.c */
-                   tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
-                   /*
-                    * If this read emptied read buffer, we send ACK, if
-                    * connection is not bidirectional, user drained
-                    * receive buffer and there was a small segment
-                    * in queue.
-                    */
-                   (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
-                    !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
-                       time_to_ack = 1;
-       }
-
-       /* We send an ACK if we can now advertise a non-zero window
-        * which has been raised "significantly".
-        *
-        * Even if window raised up to infinity, do not send window open ACK
-        * in states, where we will not receive more. It is useless.
-        */
-       if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
-               __u32 rcv_window_now = tcp_receive_window(tp);
-
-               /* Optimize, __tcp_select_window() is not cheap. */
-               if (2*rcv_window_now <= tp->window_clamp) {
-                       __u32 new_window = __tcp_select_window(sk);
-
-                       /* Send ACK now, if this read freed lots of space
-                        * in our buffer. Certainly, new_window is new window.
-                        * We can advertise it now, if it is not less than current one.
-                        * "Lots" means "at least twice" here.
-                        */
-                       if (new_window && new_window >= 2 * rcv_window_now)
-                               time_to_ack = 1;
-               }
-       }
-       if (time_to_ack)
-               tcp_send_ack(sk);
-}
-
-static void tcp_prequeue_process(struct sock *sk)
-{
-       struct sk_buff *skb;
-       struct tcp_sock *tp = tcp_sk(sk);
-
-       NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
-
-       /* RX process wants to run with disabled BHs, though it is not
-        * necessary */
-       local_bh_disable();
-       while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
-               sk->sk_backlog_rcv(sk, skb);
-       local_bh_enable();
-
-       /* Clear memory counter. */
-       tp->ucopy.memory = 0;
-}
-
-static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
-{
-       struct sk_buff *skb;
-       u32 offset;
-
-       skb_queue_walk(&sk->sk_receive_queue, skb) {
-               offset = seq - TCP_SKB_CB(skb)->seq;
-               if (skb->h.th->syn)
-                       offset--;
-               if (offset < skb->len || skb->h.th->fin) {
-                       *off = offset;
-                       return skb;
-               }
-       }
-       return NULL;
-}
-
-/*
- * This routine provides an alternative to tcp_recvmsg() for routines
- * that would like to handle copying from skbuffs directly in 'sendfile'
- * fashion.
- * Note:
- *     - It is assumed that the socket was locked by the caller.
- *     - The routine does not block.
- *     - At present, there is no support for reading OOB data
- *       or for 'peeking' the socket using this routine
- *       (although both would be easy to implement).
- */
-int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
-                 sk_read_actor_t recv_actor)
-{
-       struct sk_buff *skb;
-       struct tcp_sock *tp = tcp_sk(sk);
-       u32 seq = tp->copied_seq;
-       u32 offset;
-       int copied = 0;
-
-       if (sk->sk_state == TCP_LISTEN)
-               return -ENOTCONN;
-       while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
-               if (offset < skb->len) {
-                       size_t used, len;
-
-                       len = skb->len - offset;
-                       /* Stop reading if we hit a patch of urgent data */
-                       if (tp->urg_data) {
-                               u32 urg_offset = tp->urg_seq - seq;
-                               if (urg_offset < len)
-                                       len = urg_offset;
-                               if (!len)
-                                       break;
-                       }
-                       used = recv_actor(desc, skb, offset, len);
-                       if (used <= len) {
-                               seq += used;
-                               copied += used;
-                               offset += used;
-                       }
-                       if (offset != skb->len)
-                               break;
-               }
-               if (skb->h.th->fin) {
-                       sk_eat_skb(sk, skb);
-                       ++seq;
-                       break;
-               }
-               sk_eat_skb(sk, skb);
-               if (!desc->count)
-                       break;
-       }
-       tp->copied_seq = seq;
-
-       tcp_rcv_space_adjust(sk);
-
-       /* Clean up data we have read: This will do ACK frames. */
-       if (copied)
-               cleanup_rbuf(sk, copied);
-       return copied;
-}
-
-/*
- *     This routine copies from a sock struct into the user buffer.
- *
- *     Technical note: in 2.3 we work on _locked_ socket, so that
- *     tricks with *seq access order and skb->users are not required.
- *     Probably, code can be easily improved even more.
- */
-
-int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-               size_t len, int nonblock, int flags, int *addr_len)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       int copied = 0;
-       u32 peek_seq;
-       u32 *seq;
-       unsigned long used;
-       int err;
-       int target;             /* Read at least this many bytes */
-       long timeo;
-       struct task_struct *user_recv = NULL;
-
-       lock_sock(sk);
-
-       TCP_CHECK_TIMER(sk);
-
-       err = -ENOTCONN;
-       if (sk->sk_state == TCP_LISTEN)
-               goto out;
-
-       timeo = sock_rcvtimeo(sk, nonblock);
-
-       /* Urgent data needs to be handled specially. */
-       if (flags & MSG_OOB)
-               goto recv_urg;
-
-       seq = &tp->copied_seq;
-       if (flags & MSG_PEEK) {
-               peek_seq = tp->copied_seq;
-               seq = &peek_seq;
-       }
-
-       target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
-
-       do {
-               struct sk_buff *skb;
-               u32 offset;
-
-               /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
-               if (tp->urg_data && tp->urg_seq == *seq) {
-                       if (copied)
-                               break;
-                       if (signal_pending(current)) {
-                               copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
-                               break;
-                       }
-               }
-
-               /* Next get a buffer. */
-
-               skb = skb_peek(&sk->sk_receive_queue);
-               do {
-                       if (!skb)
-                               break;
-
-                       /* Now that we have two receive queues this
-                        * shouldn't happen.
-                        */
-                       if (before(*seq, TCP_SKB_CB(skb)->seq)) {
-                               printk(KERN_INFO "recvmsg bug: copied %X "
-                                      "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
-                               break;
-                       }
-                       offset = *seq - TCP_SKB_CB(skb)->seq;
-                       if (skb->h.th->syn)
-                               offset--;
-                       if (offset < skb->len)
-                               goto found_ok_skb;
-                       if (skb->h.th->fin)
-                               goto found_fin_ok;
-                       BUG_TRAP(flags & MSG_PEEK);
-                       skb = skb->next;
-               } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
-
-               /* Well, if we have backlog, try to process it now yet. */
-
-               if (copied >= target && !sk->sk_backlog.tail)
-                       break;
-
-               if (copied) {
-                       if (sk->sk_err ||
-                           sk->sk_state == TCP_CLOSE ||
-                           (sk->sk_shutdown & RCV_SHUTDOWN) ||
-                           !timeo ||
-                           signal_pending(current) ||
-                           (flags & MSG_PEEK))
-                               break;
-               } else {
-                       if (sock_flag(sk, SOCK_DONE))
-                               break;
-
-                       if (sk->sk_err) {
-                               copied = sock_error(sk);
-                               break;
-                       }
-
-                       if (sk->sk_shutdown & RCV_SHUTDOWN)
-                               break;
-
-                       if (sk->sk_state == TCP_CLOSE) {
-                               if (!sock_flag(sk, SOCK_DONE)) {
-                                       /* This occurs when user tries to read
-                                        * from never connected socket.
-                                        */
-                                       copied = -ENOTCONN;
-                                       break;
-                               }
-                               break;
-                       }
-
-                       if (!timeo) {
-                               copied = -EAGAIN;
-                               break;
-                       }
-
-                       if (signal_pending(current)) {
-                               copied = sock_intr_errno(timeo);
-                               break;
-                       }
-               }
-
-               cleanup_rbuf(sk, copied);
-
-               if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
-                       /* Install new reader */
-                       if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
-                               user_recv = current;
-                               tp->ucopy.task = user_recv;
-                               tp->ucopy.iov = msg->msg_iov;
-                       }
-
-                       tp->ucopy.len = len;
-
-                       BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
-                                (flags & (MSG_PEEK | MSG_TRUNC)));
-
-                       /* Ugly... If prequeue is not empty, we have to
-                        * process it before releasing socket, otherwise
-                        * order will be broken at second iteration.
-                        * More elegant solution is required!!!
-                        *
-                        * Look: we have the following (pseudo)queues:
-                        *
-                        * 1. packets in flight
-                        * 2. backlog
-                        * 3. prequeue
-                        * 4. receive_queue
-                        *
-                        * Each queue can be processed only if the next ones
-                        * are empty. At this point we have empty receive_queue.
-                        * But prequeue _can_ be not empty after 2nd iteration,
-                        * when we jumped to start of loop because backlog
-                        * processing added something to receive_queue.
-                        * We cannot release_sock(), because backlog contains
-                        * packets arrived _after_ prequeued ones.
-                        *
-                        * Shortly, algorithm is clear --- to process all
-                        * the queues in order. We could make it more directly,
-                        * requeueing packets from backlog to prequeue, if
-                        * is not empty. It is more elegant, but eats cycles,
-                        * unfortunately.
-                        */
-                       if (!skb_queue_empty(&tp->ucopy.prequeue))
-                               goto do_prequeue;
-
-                       /* __ Set realtime policy in scheduler __ */
-               }
-
-               if (copied >= target) {
-                       /* Do not sleep, just process backlog. */
-                       release_sock(sk);
-                       lock_sock(sk);
-               } else
-                       sk_wait_data(sk, &timeo);
-
-               if (user_recv) {
-                       int chunk;
-
-                       /* __ Restore normal policy in scheduler __ */
-
-                       if ((chunk = len - tp->ucopy.len) != 0) {
-                               NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
-                               len -= chunk;
-                               copied += chunk;
-                       }
-
-                       if (tp->rcv_nxt == tp->copied_seq &&
-                           !skb_queue_empty(&tp->ucopy.prequeue)) {
-do_prequeue:
-                               tcp_prequeue_process(sk);
-
-                               if ((chunk = len - tp->ucopy.len) != 0) {
-                                       NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
-                                       len -= chunk;
-                                       copied += chunk;
-                               }
-                       }
-               }
-               if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
-                       if (net_ratelimit())
-                               printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
-                                      current->comm, current->pid);
-                       peek_seq = tp->copied_seq;
-               }
-               continue;
-
-       found_ok_skb:
-               /* Ok so how much can we use? */
-               used = skb->len - offset;
-               if (len < used)
-                       used = len;
-
-               /* Do we have urgent data here? */
-               if (tp->urg_data) {
-                       u32 urg_offset = tp->urg_seq - *seq;
-                       if (urg_offset < used) {
-                               if (!urg_offset) {
-                                       if (!sock_flag(sk, SOCK_URGINLINE)) {
-                                               ++*seq;
-                                               offset++;
-                                               used--;
-                                               if (!used)
-                                                       goto skip_copy;
-                                       }
-                               } else
-                                       used = urg_offset;
-                       }
-               }
-
-               if (!(flags & MSG_TRUNC)) {
-                       err = skb_copy_datagram_iovec(skb, offset,
-                                                     msg->msg_iov, used);
-                       if (err) {
-                               /* Exception. Bailout! */
-                               if (!copied)
-                                       copied = -EFAULT;
-                               break;
-                       }
-               }
-
-               *seq += used;
-               copied += used;
-               len -= used;
-
-               tcp_rcv_space_adjust(sk);
-
-skip_copy:
-               if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
-                       tp->urg_data = 0;
-                       tcp_fast_path_check(sk, tp);
-               }
-               if (used + offset < skb->len)
-                       continue;
-
-               if (skb->h.th->fin)
-                       goto found_fin_ok;
-               if (!(flags & MSG_PEEK))
-                       sk_eat_skb(sk, skb);
-               continue;
-
-       found_fin_ok:
-               /* Process the FIN. */
-               ++*seq;
-               if (!(flags & MSG_PEEK))
-                       sk_eat_skb(sk, skb);
-               break;
-       } while (len > 0);
-
-       if (user_recv) {
-               if (!skb_queue_empty(&tp->ucopy.prequeue)) {
-                       int chunk;
-
-                       tp->ucopy.len = copied > 0 ? len : 0;
-
-                       tcp_prequeue_process(sk);
-
-                       if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
-                               NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
-                               len -= chunk;
-                               copied += chunk;
-                       }
-               }
-
-               tp->ucopy.task = NULL;
-               tp->ucopy.len = 0;
-       }
-
-       /* According to UNIX98, msg_name/msg_namelen are ignored
-        * on connected socket. I was just happy when found this 8) --ANK
-        */
-
-       /* Clean up data we have read: This will do ACK frames. */
-       cleanup_rbuf(sk, copied);
-
-       TCP_CHECK_TIMER(sk);
-       release_sock(sk);
-       return copied;
-
-out:
-       TCP_CHECK_TIMER(sk);
-       release_sock(sk);
-       return err;
-
-recv_urg:
-       err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
-       goto out;
-}
-
-/*
- *     State processing on a close. This implements the state shift for
- *     sending our FIN frame. Note that we only send a FIN for some
- *     states. A shutdown() may have already sent the FIN, or we may be
- *     closed.
- */
-
-static const unsigned char new_state[16] = {
-  /* current state:        new state:      action:     */
-  /* (Invalid)         */ TCP_CLOSE,
-  /* TCP_ESTABLISHED   */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
-  /* TCP_SYN_SENT      */ TCP_CLOSE,
-  /* TCP_SYN_RECV      */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
-  /* TCP_FIN_WAIT1     */ TCP_FIN_WAIT1,
-  /* TCP_FIN_WAIT2     */ TCP_FIN_WAIT2,
-  /* TCP_TIME_WAIT     */ TCP_CLOSE,
-  /* TCP_CLOSE         */ TCP_CLOSE,
-  /* TCP_CLOSE_WAIT    */ TCP_LAST_ACK  | TCP_ACTION_FIN,
-  /* TCP_LAST_ACK      */ TCP_LAST_ACK,
-  /* TCP_LISTEN                */ TCP_CLOSE,
-  /* TCP_CLOSING       */ TCP_CLOSING,
-};
-
-static int tcp_close_state(struct sock *sk)
-{
-       int next = (int)new_state[sk->sk_state];
-       int ns = next & TCP_STATE_MASK;
-
-       tcp_set_state(sk, ns);
-
-       return next & TCP_ACTION_FIN;
-}
-
-/*
- *     Shutdown the sending side of a connection. Much like close except
- *     that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
- */
-
-void tcp_shutdown(struct sock *sk, int how)
-{
-       /*      We need to grab some memory, and put together a FIN,
-        *      and then put it into the queue to be sent.
-        *              Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
-        */
-       if (!(how & SEND_SHUTDOWN))
-               return;
-
-       /* If we've already sent a FIN, or it's a closed state, skip this. */
-       if ((1 << sk->sk_state) &
-           (TCPF_ESTABLISHED | TCPF_SYN_SENT |
-            TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
-               /* Clear out any half completed packets.  FIN if needed. */
-               if (tcp_close_state(sk))
-                       tcp_send_fin(sk);
-       }
-}
-
-void tcp_close(struct sock *sk, long timeout)
-{
-       struct sk_buff *skb;
-       int data_was_unread = 0;
-       int state;
-
-       lock_sock(sk);
-       sk->sk_shutdown = SHUTDOWN_MASK;
-
-       if (sk->sk_state == TCP_LISTEN) {
-               tcp_set_state(sk, TCP_CLOSE);
-
-               /* Special case. */
-               inet_csk_listen_stop(sk);
-
-               goto adjudge_to_death;
-       }
-
-       /*  We need to flush the recv. buffs.  We do this only on the
-        *  descriptor close, not protocol-sourced closes, because the
-        *  reader process may not have drained the data yet!
-        */
-       while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
-               u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
-                         skb->h.th->fin;
-               data_was_unread += len;
-               __kfree_skb(skb);
-       }
-
-       sk_stream_mem_reclaim(sk);
-
-       /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
-        * 3.10, we send a RST here because data was lost.  To
-        * witness the awful effects of the old behavior of always
-        * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
-        * a bulk GET in an FTP client, suspend the process, wait
-        * for the client to advertise a zero window, then kill -9
-        * the FTP client, wheee...  Note: timeout is always zero
-        * in such a case.
-        */
-       if (data_was_unread) {
-               /* Unread data was tossed, zap the connection. */
-               NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
-               tcp_set_state(sk, TCP_CLOSE);
-               tcp_send_active_reset(sk, GFP_KERNEL);
-       } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
-               /* Check zero linger _after_ checking for unread data. */
-               sk->sk_prot->disconnect(sk, 0);
-               NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
-       } else if (tcp_close_state(sk)) {
-               /* We FIN if the application ate all the data before
-                * zapping the connection.
-                */
-
-               /* RED-PEN. Formally speaking, we have broken TCP state
-                * machine. State transitions:
-                *
-                * TCP_ESTABLISHED -> TCP_FIN_WAIT1
-                * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
-                * TCP_CLOSE_WAIT -> TCP_LAST_ACK
-                *
-                * are legal only when FIN has been sent (i.e. in window),
-                * rather than queued out of window. Purists blame.
-                *
-                * F.e. "RFC state" is ESTABLISHED,
-                * if Linux state is FIN-WAIT-1, but FIN is still not sent.
-                *
-                * The visible declinations are that sometimes
-                * we enter time-wait state, when it is not required really
-                * (harmless), do not send active resets, when they are
-                * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
-                * they look as CLOSING or LAST_ACK for Linux)
-                * Probably, I missed some more holelets.
-                *                                              --ANK
-                */
-               tcp_send_fin(sk);
-       }
-
-       sk_stream_wait_close(sk, timeout);
-
-adjudge_to_death:
-       state = sk->sk_state;
-       sock_hold(sk);
-       sock_orphan(sk);
-       atomic_inc(sk->sk_prot->orphan_count);
-
-       /* It is the last release_sock in its life. It will remove backlog. */
-       release_sock(sk);
-
-
-       /* Now socket is owned by kernel and we acquire BH lock
-          to finish close. No need to check for user refs.
-        */
-       local_bh_disable();
-       bh_lock_sock(sk);
-       BUG_TRAP(!sock_owned_by_user(sk));
-
-       /* Have we already been destroyed by a softirq or backlog? */
-       if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
-               goto out;
-
-       /*      This is a (useful) BSD violating of the RFC. There is a
-        *      problem with TCP as specified in that the other end could
-        *      keep a socket open forever with no application left this end.
-        *      We use a 3 minute timeout (about the same as BSD) then kill
-        *      our end. If they send after that then tough - BUT: long enough
-        *      that we won't make the old 4*rto = almost no time - whoops
-        *      reset mistake.
-        *
-        *      Nope, it was not mistake. It is really desired behaviour
-        *      f.e. on http servers, when such sockets are useless, but
-        *      consume significant resources. Let's do it with special
-        *      linger2 option.                                 --ANK
-        */
-
-       if (sk->sk_state == TCP_FIN_WAIT2) {
-               struct tcp_sock *tp = tcp_sk(sk);
-               if (tp->linger2 < 0) {
-                       tcp_set_state(sk, TCP_CLOSE);
-                       tcp_send_active_reset(sk, GFP_ATOMIC);
-                       NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
-               } else {
-                       const int tmo = tcp_fin_time(sk);
-
-                       if (tmo > TCP_TIMEWAIT_LEN) {
-                               inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
-                       } else {
-                               tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
-                               goto out;
-                       }
-               }
-       }
-       if (sk->sk_state != TCP_CLOSE) {
-               sk_stream_mem_reclaim(sk);
-               if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
-                   (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
-                    atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
-                       if (net_ratelimit())
-                               printk(KERN_INFO "TCP: too many of orphaned "
-                                      "sockets\n");
-                       tcp_set_state(sk, TCP_CLOSE);
-                       tcp_send_active_reset(sk, GFP_ATOMIC);
-                       NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
-               }
-       }
-
-       if (sk->sk_state == TCP_CLOSE)
-               inet_csk_destroy_sock(sk);
-       /* Otherwise, socket is reprieved until protocol close. */
-
-out:
-       bh_unlock_sock(sk);
-       local_bh_enable();
-       sock_put(sk);
-}
-
-/* These states need RST on ABORT according to RFC793 */
-
-static inline int tcp_need_reset(int state)
-{
-       return (1 << state) &
-              (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
-               TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
-}
-
-int tcp_disconnect(struct sock *sk, int flags)
-{
-       struct inet_sock *inet = inet_sk(sk);
-       struct inet_connection_sock *icsk = inet_csk(sk);
-       struct tcp_sock *tp = tcp_sk(sk);
-       int err = 0;
-       int old_state = sk->sk_state;
-
-       if (old_state != TCP_CLOSE)
-               tcp_set_state(sk, TCP_CLOSE);
-
-       /* ABORT function of RFC793 */
-       if (old_state == TCP_LISTEN) {
-               inet_csk_listen_stop(sk);
-       } else if (tcp_need_reset(old_state) ||
-                  (tp->snd_nxt != tp->write_seq &&
-                   (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
-               /* The last check adjusts for discrepancy of Linux wrt. RFC
-                * states
-                */
-               tcp_send_active_reset(sk, gfp_any());
-               sk->sk_err = ECONNRESET;
-       } else if (old_state == TCP_SYN_SENT)
-               sk->sk_err = ECONNRESET;
-
-       tcp_clear_xmit_timers(sk);
-       __skb_queue_purge(&sk->sk_receive_queue);
-       sk_stream_writequeue_purge(sk);
-       __skb_queue_purge(&tp->out_of_order_queue);
-
-       inet->dport = 0;
-
-       if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
-               inet_reset_saddr(sk);
-
-       sk->sk_shutdown = 0;
-       sock_reset_flag(sk, SOCK_DONE);
-       tp->srtt = 0;
-       if ((tp->write_seq += tp->max_window + 2) == 0)
-               tp->write_seq = 1;
-       icsk->icsk_backoff = 0;
-       tp->snd_cwnd = 2;
-       icsk->icsk_probes_out = 0;
-       tp->packets_out = 0;
-       tp->snd_ssthresh = 0x7fffffff;
-       tp->snd_cwnd_cnt = 0;
-       tp->bytes_acked = 0;
-       tcp_set_ca_state(sk, TCP_CA_Open);
-       tcp_clear_retrans(tp);
-       inet_csk_delack_init(sk);
-       sk->sk_send_head = NULL;
-       tp->rx_opt.saw_tstamp = 0;
-       tcp_sack_reset(&tp->rx_opt);
-       __sk_dst_reset(sk);
-
-       BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
-
-       sk->sk_error_report(sk);
-       return err;
-}
-
-/*
- *     Socket option code for TCP.
- */
-static int do_tcp_setsockopt(struct sock *sk, int level,
-               int optname, char __user *optval, int optlen)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct inet_connection_sock *icsk = inet_csk(sk);
-       int val;
-       int err = 0;
-
-       /* This is a string value all the others are int's */
-       if (optname == TCP_CONGESTION) {
-               char name[TCP_CA_NAME_MAX];
-
-               if (optlen < 1)
-                       return -EINVAL;
-
-               val = strncpy_from_user(name, optval,
-                                       min(TCP_CA_NAME_MAX-1, optlen));
-               if (val < 0)
-                       return -EFAULT;
-               name[val] = 0;
-
-               lock_sock(sk);
-               err = tcp_set_congestion_control(sk, name);
-               release_sock(sk);
-               return err;
-       }
-
-       if (optlen < sizeof(int))
-               return -EINVAL;
-
-       if (get_user(val, (int __user *)optval))
-               return -EFAULT;
-
-       lock_sock(sk);
-
-       switch (optname) {
-       case TCP_MAXSEG:
-               /* Values greater than interface MTU won't take effect. However
-                * at the point when this call is done we typically don't yet
-                * know which interface is going to be used */
-               if (val < 8 || val > MAX_TCP_WINDOW) {
-                       err = -EINVAL;
-                       break;
-               }
-               tp->rx_opt.user_mss = val;
-               break;
-
-       case TCP_NODELAY:
-               if (val) {
-                       /* TCP_NODELAY is weaker than TCP_CORK, so that
-                        * this option on corked socket is remembered, but
-                        * it is not activated until cork is cleared.
-                        *
-                        * However, when TCP_NODELAY is set we make
-                        * an explicit push, which overrides even TCP_CORK
-                        * for currently queued segments.
-                        */
-                       tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
-                       tcp_push_pending_frames(sk, tp);
-               } else {
-                       tp->nonagle &= ~TCP_NAGLE_OFF;
-               }
-               break;
-
-       case TCP_CORK:
-               /* When set indicates to always queue non-full frames.
-                * Later the user clears this option and we transmit
-                * any pending partial frames in the queue.  This is
-                * meant to be used alongside sendfile() to get properly
-                * filled frames when the user (for example) must write
-                * out headers with a write() call first and then use
-                * sendfile to send out the data parts.
-                *
-                * TCP_CORK can be set together with TCP_NODELAY and it is
-                * stronger than TCP_NODELAY.
-                */
-               if (val) {
-                       tp->nonagle |= TCP_NAGLE_CORK;
-               } else {
-                       tp->nonagle &= ~TCP_NAGLE_CORK;
-                       if (tp->nonagle&TCP_NAGLE_OFF)
-                               tp->nonagle |= TCP_NAGLE_PUSH;
-                       tcp_push_pending_frames(sk, tp);
-               }
-               break;
-
-       case TCP_KEEPIDLE:
-               if (val < 1 || val > MAX_TCP_KEEPIDLE)
-                       err = -EINVAL;
-               else {
-                       tp->keepalive_time = val * HZ;
-                       if (sock_flag(sk, SOCK_KEEPOPEN) &&
-                           !((1 << sk->sk_state) &
-                             (TCPF_CLOSE | TCPF_LISTEN))) {
-                               __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
-                               if (tp->keepalive_time > elapsed)
-                                       elapsed = tp->keepalive_time - elapsed;
-                               else
-                                       elapsed = 0;
-                               inet_csk_reset_keepalive_timer(sk, elapsed);
-                       }
-               }
-               break;
-       case TCP_KEEPINTVL:
-               if (val < 1 || val > MAX_TCP_KEEPINTVL)
-                       err = -EINVAL;
-               else
-                       tp->keepalive_intvl = val * HZ;
-               break;
-       case TCP_KEEPCNT:
-               if (val < 1 || val > MAX_TCP_KEEPCNT)
-                       err = -EINVAL;
-               else
-                       tp->keepalive_probes = val;
-               break;
-       case TCP_SYNCNT:
-               if (val < 1 || val > MAX_TCP_SYNCNT)
-                       err = -EINVAL;
-               else
-                       icsk->icsk_syn_retries = val;
-               break;
-
-       case TCP_LINGER2:
-               if (val < 0)
-                       tp->linger2 = -1;
-               else if (val > sysctl_tcp_fin_timeout / HZ)
-                       tp->linger2 = 0;
-               else
-                       tp->linger2 = val * HZ;
-               break;
-
-       case TCP_DEFER_ACCEPT:
-               icsk->icsk_accept_queue.rskq_defer_accept = 0;
-               if (val > 0) {
-                       /* Translate value in seconds to number of
-                        * retransmits */
-                       while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
-                              val > ((TCP_TIMEOUT_INIT / HZ) <<
-                                      icsk->icsk_accept_queue.rskq_defer_accept))
-                               icsk->icsk_accept_queue.rskq_defer_accept++;
-                       icsk->icsk_accept_queue.rskq_defer_accept++;
-               }
-               break;
-
-       case TCP_WINDOW_CLAMP:
-               if (!val) {
-                       if (sk->sk_state != TCP_CLOSE) {
-                               err = -EINVAL;
-                               break;
-                       }
-                       tp->window_clamp = 0;
-               } else
-                       tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
-                                               SOCK_MIN_RCVBUF / 2 : val;
-               break;
-
-       case TCP_QUICKACK:
-               if (!val) {
-                       icsk->icsk_ack.pingpong = 1;
-               } else {
-                       icsk->icsk_ack.pingpong = 0;
-                       if ((1 << sk->sk_state) &
-                           (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
-                           inet_csk_ack_scheduled(sk)) {
-                               icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
-                               cleanup_rbuf(sk, 1);
-                               if (!(val & 1))
-                                       icsk->icsk_ack.pingpong = 1;
-                       }
-               }
-               break;
-
-       default:
-               err = -ENOPROTOOPT;
-               break;
-       };
-       release_sock(sk);
-       return err;
-}
-
-int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
-                  int optlen)
-{
-       struct inet_connection_sock *icsk = inet_csk(sk);
-
-       if (level != SOL_TCP)
-               return icsk->icsk_af_ops->setsockopt(sk, level, optname,
-                                                    optval, optlen);
-       return do_tcp_setsockopt(sk, level, optname, optval, optlen);
-}
-
-#ifdef CONFIG_COMPAT
-int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, int optlen)
-{
-       if (level != SOL_TCP)
-               return inet_csk_compat_setsockopt(sk, level, optname,
-                                                 optval, optlen);
-       return do_tcp_setsockopt(sk, level, optname, optval, optlen);
-}
-
-EXPORT_SYMBOL(compat_tcp_setsockopt);
-#endif
-
-/* Return information about state of tcp endpoint in API format. */
-void tcp_get_info(struct sock *sk, struct tcp_info *info)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       const struct inet_connection_sock *icsk = inet_csk(sk);
-       u32 now = tcp_time_stamp;
-
-       memset(info, 0, sizeof(*info));
-
-       info->tcpi_state = sk->sk_state;
-       info->tcpi_ca_state = icsk->icsk_ca_state;
-       info->tcpi_retransmits = icsk->icsk_retransmits;
-       info->tcpi_probes = icsk->icsk_probes_out;
-       info->tcpi_backoff = icsk->icsk_backoff;
-
-       if (tp->rx_opt.tstamp_ok)
-               info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
-       if (tp->rx_opt.sack_ok)
-               info->tcpi_options |= TCPI_OPT_SACK;
-       if (tp->rx_opt.wscale_ok) {
-               info->tcpi_options |= TCPI_OPT_WSCALE;
-               info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
-               info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
-       } 
-
-       if (tp->ecn_flags&TCP_ECN_OK)
-               info->tcpi_options |= TCPI_OPT_ECN;
-
-       info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
-       info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
-       info->tcpi_snd_mss = tp->mss_cache;
-       info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
-
-       info->tcpi_unacked = tp->packets_out;
-       info->tcpi_sacked = tp->sacked_out;
-       info->tcpi_lost = tp->lost_out;
-       info->tcpi_retrans = tp->retrans_out;
-       info->tcpi_fackets = tp->fackets_out;
-
-       info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
-       info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
-       info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
-
-       info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
-       info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
-       info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
-       info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
-       info->tcpi_snd_ssthresh = tp->snd_ssthresh;
-       info->tcpi_snd_cwnd = tp->snd_cwnd;
-       info->tcpi_advmss = tp->advmss;
-       info->tcpi_reordering = tp->reordering;
-
-       info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
-       info->tcpi_rcv_space = tp->rcvq_space.space;
-
-       info->tcpi_total_retrans = tp->total_retrans;
-}
-
-EXPORT_SYMBOL_GPL(tcp_get_info);
-
-static int do_tcp_getsockopt(struct sock *sk, int level,
-               int optname, char __user *optval, int __user *optlen)
-{
-       struct inet_connection_sock *icsk = inet_csk(sk);
-       struct tcp_sock *tp = tcp_sk(sk);
-       int val, len;
-
-       if (get_user(len, optlen))
-               return -EFAULT;
-
-       len = min_t(unsigned int, len, sizeof(int));
-
-       if (len < 0)
-               return -EINVAL;
-
-       switch (optname) {
-       case TCP_MAXSEG:
-               val = tp->mss_cache;
-               if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
-                       val = tp->rx_opt.user_mss;
-               break;
-       case TCP_NODELAY:
-               val = !!(tp->nonagle&TCP_NAGLE_OFF);
-               break;
-       case TCP_CORK:
-               val = !!(tp->nonagle&TCP_NAGLE_CORK);
-               break;
-       case TCP_KEEPIDLE:
-               val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
-               break;
-       case TCP_KEEPINTVL:
-               val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
-               break;
-       case TCP_KEEPCNT:
-               val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
-               break;
-       case TCP_SYNCNT:
-               val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
-               break;
-       case TCP_LINGER2:
-               val = tp->linger2;
-               if (val >= 0)
-                       val = (val ? : sysctl_tcp_fin_timeout) / HZ;
-               break;
-       case TCP_DEFER_ACCEPT:
-               val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
-                       ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
-               break;
-       case TCP_WINDOW_CLAMP:
-               val = tp->window_clamp;
-               break;
-       case TCP_INFO: {
-               struct tcp_info info;
-
-               if (get_user(len, optlen))
-                       return -EFAULT;
-
-               tcp_get_info(sk, &info);
-
-               len = min_t(unsigned int, len, sizeof(info));
-               if (put_user(len, optlen))
-                       return -EFAULT;
-               if (copy_to_user(optval, &info, len))
-                       return -EFAULT;
-               return 0;
-       }
-       case TCP_QUICKACK:
-               val = !icsk->icsk_ack.pingpong;
-               break;
-
-       case TCP_CONGESTION:
-               if (get_user(len, optlen))
-                       return -EFAULT;
-               len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
-               if (put_user(len, optlen))
-                       return -EFAULT;
-               if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
-                       return -EFAULT;
-               return 0;
-       default:
-               return -ENOPROTOOPT;
-       };
-
-       if (put_user(len, optlen))
-               return -EFAULT;
-       if (copy_to_user(optval, &val, len))
-               return -EFAULT;
-       return 0;
-}
-
-int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
-                  int __user *optlen)
-{
-       struct inet_connection_sock *icsk = inet_csk(sk);
-
-       if (level != SOL_TCP)
-               return icsk->icsk_af_ops->getsockopt(sk, level, optname,
-                                                    optval, optlen);
-       return do_tcp_getsockopt(sk, level, optname, optval, optlen);
-}
-
-#ifdef CONFIG_COMPAT
-int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, int __user *optlen)
-{
-       if (level != SOL_TCP)
-               return inet_csk_compat_getsockopt(sk, level, optname,
-                                                 optval, optlen);
-       return do_tcp_getsockopt(sk, level, optname, optval, optlen);
-}
-
-EXPORT_SYMBOL(compat_tcp_getsockopt);
-#endif
-
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
-{
-       struct sk_buff *segs = ERR_PTR(-EINVAL);
-       struct tcphdr *th;
-       unsigned thlen;
-       unsigned int seq;
-       unsigned int delta;
-       unsigned int oldlen;
-       unsigned int len;
-
-       if (!pskb_may_pull(skb, sizeof(*th)))
-               goto out;
-
-       th = skb->h.th;
-       thlen = th->doff * 4;
-       if (thlen < sizeof(*th))
-               goto out;
-
-       if (!pskb_may_pull(skb, thlen))
-               goto out;
-
-       oldlen = (u16)~skb->len;
-       __skb_pull(skb, thlen);
-
-       if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
-               /* Packet is from an untrusted source, reset gso_segs. */
-               int mss = skb_shinfo(skb)->gso_size;
-
-               skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
-
-               segs = NULL;
-               goto out;
-       }
-
-       segs = skb_segment(skb, features);
-       if (IS_ERR(segs))
-               goto out;
-
-       len = skb_shinfo(skb)->gso_size;
-       delta = htonl(oldlen + (thlen + len));
-
-       skb = segs;
-       th = skb->h.th;
-       seq = ntohl(th->seq);
-
-       do {
-               th->fin = th->psh = 0;
-
-               th->check = ~csum_fold(th->check + delta);
-               if (skb->ip_summed != CHECKSUM_HW)
-                       th->check = csum_fold(csum_partial(skb->h.raw, thlen,
-                                                          skb->csum));
-
-               seq += len;
-               skb = skb->next;
-               th = skb->h.th;
-
-               th->seq = htonl(seq);
-               th->cwr = 0;
-       } while (skb->next);
-
-       delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
-       th->check = ~csum_fold(th->check + delta);
-       if (skb->ip_summed != CHECKSUM_HW)
-               th->check = csum_fold(csum_partial(skb->h.raw, thlen,
-                                                  skb->csum));
-
-out:
-       return segs;
-}
-
-extern void __skb_cb_too_small_for_tcp(int, int);
-extern struct tcp_congestion_ops tcp_reno;
-
-static __initdata unsigned long thash_entries;
-static int __init set_thash_entries(char *str)
-{
-       if (!str)
-               return 0;
-       thash_entries = simple_strtoul(str, &str, 0);
-       return 1;
-}
-__setup("thash_entries=", set_thash_entries);
-
-void __init tcp_init(void)
-{
-       struct sk_buff *skb = NULL;
-       unsigned long limit;
-       int order, i, max_share;
-
-       if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
-               __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
-                                          sizeof(skb->cb));
-
-       tcp_hashinfo.bind_bucket_cachep =
-               kmem_cache_create("tcp_bind_bucket",
-                                 sizeof(struct inet_bind_bucket), 0,
-                                 SLAB_HWCACHE_ALIGN, NULL, NULL);
-       if (!tcp_hashinfo.bind_bucket_cachep)
-               panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
-
-       /* Size and allocate the main established and bind bucket
-        * hash tables.
-        *
-        * The methodology is similar to that of the buffer cache.
-        */
-       tcp_hashinfo.ehash =
-               alloc_large_system_hash("TCP established",
-                                       sizeof(struct inet_ehash_bucket),
-                                       thash_entries,
-                                       (num_physpages >= 128 * 1024) ?
-                                       13 : 15,
-                                       HASH_HIGHMEM,
-                                       &tcp_hashinfo.ehash_size,
-                                       NULL,
-                                       0);
-       tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
-       for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
-               rwlock_init(&tcp_hashinfo.ehash[i].lock);
-               INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
-       }
-
-       tcp_hashinfo.bhash =
-               alloc_large_system_hash("TCP bind",
-                                       sizeof(struct inet_bind_hashbucket),
-                                       tcp_hashinfo.ehash_size,
-                                       (num_physpages >= 128 * 1024) ?
-                                       13 : 15,
-                                       HASH_HIGHMEM,
-                                       &tcp_hashinfo.bhash_size,
-                                       NULL,
-                                       64 * 1024);
-       tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
-       for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
-               spin_lock_init(&tcp_hashinfo.bhash[i].lock);
-               INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
-       }
-
-       /* Try to be a bit smarter and adjust defaults depending
-        * on available memory.
-        */
-       for (order = 0; ((1 << order) << PAGE_SHIFT) <
-                       (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
-                       order++)
-               ;
-       if (order >= 4) {
-               sysctl_local_port_range[0] = 32768;
-               sysctl_local_port_range[1] = 61000;
-               tcp_death_row.sysctl_max_tw_buckets = 180000;
-               sysctl_tcp_max_orphans = 4096 << (order - 4);
-               sysctl_max_syn_backlog = 1024;
-       } else if (order < 3) {
-               sysctl_local_port_range[0] = 1024 * (3 - order);
-               tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
-               sysctl_tcp_max_orphans >>= (3 - order);
-               sysctl_max_syn_backlog = 128;
-       }
-
-       sysctl_tcp_mem[0] =  768 << order;
-       sysctl_tcp_mem[1] = 1024 << order;
-       sysctl_tcp_mem[2] = 1536 << order;
-
-       limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
-       max_share = min(4UL*1024*1024, limit);
-
-       sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
-       sysctl_tcp_wmem[1] = 16*1024;
-       sysctl_tcp_wmem[2] = max(64*1024, max_share);
-
-       sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
-       sysctl_tcp_rmem[1] = 87380;
-       sysctl_tcp_rmem[2] = max(87380, max_share);
-
-       printk(KERN_INFO "TCP: Hash tables configured "
-              "(established %d bind %d)\n",
-              tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
-
-       tcp_register_congestion_control(&tcp_reno);
-}
-
-EXPORT_SYMBOL(tcp_close);
-EXPORT_SYMBOL(tcp_disconnect);
-EXPORT_SYMBOL(tcp_getsockopt);
-EXPORT_SYMBOL(tcp_ioctl);
-EXPORT_SYMBOL(tcp_poll);
-EXPORT_SYMBOL(tcp_read_sock);
-EXPORT_SYMBOL(tcp_recvmsg);
-EXPORT_SYMBOL(tcp_sendmsg);
-EXPORT_SYMBOL(tcp_sendpage);
-EXPORT_SYMBOL(tcp_setsockopt);
-EXPORT_SYMBOL(tcp_shutdown);
-EXPORT_SYMBOL(tcp_statistics);
-EXPORT_SYMBOL_GPL(cleanup_rbuf);