+diff -Nurb linux-2.6.27-720/include/linux/fs.h.orig linux-2.6.27-710/include/linux/fs.h.orig
+--- linux-2.6.27-720/include/linux/fs.h.orig 2009-05-04 12:16:10.000000000 -0400
++++ linux-2.6.27-710/include/linux/fs.h.orig 1969-12-31 19:00:00.000000000 -0500
+@@ -1,2199 +0,0 @@
+-#ifndef _LINUX_FS_H
+-#define _LINUX_FS_H
+-
+-/*
+- * This file has definitions for some important file table
+- * structures etc.
+- */
+-
+-#include <linux/limits.h>
+-#include <linux/ioctl.h>
+-
+-/*
+- * It's silly to have NR_OPEN bigger than NR_FILE, but you can change
+- * the file limit at runtime and only root can increase the per-process
+- * nr_file rlimit, so it's safe to set up a ridiculously high absolute
+- * upper limit on files-per-process.
+- *
+- * Some programs (notably those using select()) may have to be
+- * recompiled to take full advantage of the new limits..
+- */
+-
+-/* Fixed constants first: */
+-#undef NR_OPEN
+-extern int sysctl_nr_open;
+-#define INR_OPEN 4096 /* Initial setting for nfile rlimits */
+-
+-#define BLOCK_SIZE_BITS 10
+-#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS)
+-
+-#define SEEK_SET 0 /* seek relative to beginning of file */
+-#define SEEK_CUR 1 /* seek relative to current file position */
+-#define SEEK_END 2 /* seek relative to end of file */
+-#define SEEK_MAX SEEK_END
+-
+-/* And dynamically-tunable limits and defaults: */
+-struct files_stat_struct {
+- int nr_files; /* read only */
+- int nr_free_files; /* read only */
+- int max_files; /* tunable */
+-};
+-extern struct files_stat_struct files_stat;
+-extern int get_max_files(void);
+-
+-struct inodes_stat_t {
+- int nr_inodes;
+- int nr_unused;
+- int dummy[5]; /* padding for sysctl ABI compatibility */
+-};
+-extern struct inodes_stat_t inodes_stat;
+-
+-extern int leases_enable, lease_break_time;
+-
+-#ifdef CONFIG_DNOTIFY
+-extern int dir_notify_enable;
+-#endif
+-
+-#define NR_FILE 8192 /* this can well be larger on a larger system */
+-
+-#define MAY_EXEC 1
+-#define MAY_WRITE 2
+-#define MAY_READ 4
+-#define MAY_APPEND 8
+-#define MAY_ACCESS 16
+-#define MAY_OPEN 32
+-
+-#define FMODE_READ 1
+-#define FMODE_WRITE 2
+-
+-/* Internal kernel extensions */
+-#define FMODE_LSEEK 4
+-#define FMODE_PREAD 8
+-#define FMODE_PWRITE FMODE_PREAD /* These go hand in hand */
+-
+-/* File is being opened for execution. Primary users of this flag are
+- distributed filesystems that can use it to achieve correct ETXTBUSY
+- behavior for cross-node execution/opening_for_writing of files */
+-#define FMODE_EXEC 16
+-
+-#define RW_MASK 1
+-#define RWA_MASK 2
+-#define READ 0
+-#define WRITE 1
+-#define READA 2 /* read-ahead - don't block if no resources */
+-#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */
+-#define READ_SYNC (READ | (1 << BIO_RW_SYNC))
+-#define READ_META (READ | (1 << BIO_RW_META))
+-#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC))
+-#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC))
+-#define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER))
+-
+-#define SEL_IN 1
+-#define SEL_OUT 2
+-#define SEL_EX 4
+-
+-/* public flags for file_system_type */
+-#define FS_REQUIRES_DEV 1
+-#define FS_BINARY_MOUNTDATA 2
+-#define FS_HAS_SUBTYPE 4
+-#define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */
+-#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move()
+- * during rename() internally.
+- */
+-
+-/*
+- * These are the fs-independent mount-flags: up to 32 flags are supported
+- */
+-#define MS_RDONLY 1 /* Mount read-only */
+-#define MS_NOSUID 2 /* Ignore suid and sgid bits */
+-#define MS_NODEV 4 /* Disallow access to device special files */
+-#define MS_NOEXEC 8 /* Disallow program execution */
+-#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
+-#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
+-#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
+-#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
+-#define MS_NOATIME 1024 /* Do not update access times. */
+-#define MS_NODIRATIME 2048 /* Do not update directory access times */
+-#define MS_BIND 4096
+-#define MS_MOVE 8192
+-#define MS_REC 16384
+-#define MS_VERBOSE 32768 /* War is peace. Verbosity is silence.
+- MS_VERBOSE is deprecated. */
+-#define MS_SILENT 32768
+-#define MS_POSIXACL (1<<16) /* VFS does not apply the umask */
+-#define MS_UNBINDABLE (1<<17) /* change to unbindable */
+-#define MS_PRIVATE (1<<18) /* change to private */
+-#define MS_SLAVE (1<<19) /* change to slave */
+-#define MS_SHARED (1<<20) /* change to shared */
+-#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
+-#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
+-#define MS_I_VERSION (1<<23) /* Update inode I_version field */
+-#define MS_TAGGED (1<<24) /* use generic inode tagging */
+-#define MS_TAGID (1<<25) /* use specific tag for this mount */
+-#define MS_NOTAGCHECK (1<<26) /* don't check tags */
+-#define MS_ACTIVE (1<<30)
+-#define MS_NOUSER (1<<31)
+-
+-/*
+- * Superblock flags that can be altered by MS_REMOUNT
+- */
+-#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK)
+-
+-/*
+- * Old magic mount flag and mask
+- */
+-#define MS_MGC_VAL 0xC0ED0000
+-#define MS_MGC_MSK 0xffff0000
+-
+-/* Inode flags - they have nothing to superblock flags now */
+-
+-#define S_SYNC 1 /* Writes are synced at once */
+-#define S_NOATIME 2 /* Do not update access times */
+-#define S_APPEND 4 /* Append-only file */
+-#define S_IMMUTABLE 8 /* Immutable file */
+-#define S_DEAD 16 /* removed, but still open directory */
+-#define S_NOQUOTA 32 /* Inode is not counted to quota */
+-#define S_DIRSYNC 64 /* Directory modifications are synchronous */
+-#define S_NOCMTIME 128 /* Do not update file c/mtime */
+-#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */
+-#define S_PRIVATE 512 /* Inode is fs-internal */
+-#define S_IXUNLINK 1024 /* Immutable Invert on unlink */
+-
+-/* Linux-VServer related Inode flags */
+-
+-#define V_VALID 1
+-#define V_XATTR 2
+-#define V_BARRIER 4 /* Barrier for chroot() */
+-#define V_COW 8 /* Copy on Write */
+-
+-/*
+- * Note that nosuid etc flags are inode-specific: setting some file-system
+- * flags just means all the inodes inherit those flags by default. It might be
+- * possible to override it selectively if you really wanted to with some
+- * ioctl() that is not currently implemented.
+- *
+- * Exception: MS_RDONLY is always applied to the entire file system.
+- *
+- * Unfortunately, it is possible to change a filesystems flags with it mounted
+- * with files in use. This means that all of the inodes will not have their
+- * i_flags updated. Hence, i_flags no longer inherit the superblock mount
+- * flags, so these have to be checked separately. -- rmk@arm.uk.linux.org
+- */
+-#define __IS_FLG(inode,flg) ((inode)->i_sb->s_flags & (flg))
+-
+-#define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY)
+-#define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \
+- ((inode)->i_flags & S_SYNC))
+-#define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
+- ((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
+-#define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK)
+-#define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
+-#define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION)
+-#define IS_TAGGED(inode) __IS_FLG(inode, MS_TAGGED)
+-
+-#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
+-#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
+-#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+-#define IS_IXUNLINK(inode) ((inode)->i_flags & S_IXUNLINK)
+-#define IS_IXORUNLINK(inode) ((IS_IXUNLINK(inode) ? S_IMMUTABLE : 0) ^ IS_IMMUTABLE(inode))
+-#define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL)
+-
+-#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
+-#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
+-#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
+-#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
+-
+-#define IS_BARRIER(inode) (S_ISDIR((inode)->i_mode) && ((inode)->i_vflags & V_BARRIER))
+-
+-#ifdef CONFIG_VSERVER_COWBL
+-# define IS_COW(inode) (IS_IXUNLINK(inode) && IS_IMMUTABLE(inode))
+-# define IS_COW_LINK(inode) (S_ISREG((inode)->i_mode) && ((inode)->i_nlink > 1))
+-#else
+-# define IS_COW(inode) (0)
+-# define IS_COW_LINK(inode) (0)
+-#endif
+-
+-/* the read-only stuff doesn't really belong here, but any other place is
+- probably as bad and I don't want to create yet another include file. */
+-
+-#define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */
+-#define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */
+-#define BLKRRPART _IO(0x12,95) /* re-read partition table */
+-#define BLKGETSIZE _IO(0x12,96) /* return device size /512 (long *arg) */
+-#define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */
+-#define BLKRASET _IO(0x12,98) /* set read ahead for block device */
+-#define BLKRAGET _IO(0x12,99) /* get current read ahead setting */
+-#define BLKFRASET _IO(0x12,100)/* set filesystem (mm/filemap.c) read-ahead */
+-#define BLKFRAGET _IO(0x12,101)/* get filesystem (mm/filemap.c) read-ahead */
+-#define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */
+-#define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */
+-#define BLKSSZGET _IO(0x12,104)/* get block device sector size */
+-#if 0
+-#define BLKPG _IO(0x12,105)/* See blkpg.h */
+-
+-/* Some people are morons. Do not use sizeof! */
+-
+-#define BLKELVGET _IOR(0x12,106,size_t)/* elevator get */
+-#define BLKELVSET _IOW(0x12,107,size_t)/* elevator set */
+-/* This was here just to show that the number is taken -
+- probably all these _IO(0x12,*) ioctls should be moved to blkpg.h. */
+-#endif
+-/* A jump here: 108-111 have been used for various private purposes. */
+-#define BLKBSZGET _IOR(0x12,112,size_t)
+-#define BLKBSZSET _IOW(0x12,113,size_t)
+-#define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */
+-#define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup)
+-#define BLKTRACESTART _IO(0x12,116)
+-#define BLKTRACESTOP _IO(0x12,117)
+-#define BLKTRACETEARDOWN _IO(0x12,118)
+-
+-#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
+-#define FIBMAP _IO(0x00,1) /* bmap access */
+-#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */
+-
+-#define FS_IOC_GETFLAGS _IOR('f', 1, long)
+-#define FS_IOC_SETFLAGS _IOW('f', 2, long)
+-#define FS_IOC_GETVERSION _IOR('v', 1, long)
+-#define FS_IOC_SETVERSION _IOW('v', 2, long)
+-#define FS_IOC32_GETFLAGS _IOR('f', 1, int)
+-#define FS_IOC32_SETFLAGS _IOW('f', 2, int)
+-#define FS_IOC32_GETVERSION _IOR('v', 1, int)
+-#define FS_IOC32_SETVERSION _IOW('v', 2, int)
+-
+-/*
+- * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
+- */
+-#define FS_SECRM_FL 0x00000001 /* Secure deletion */
+-#define FS_UNRM_FL 0x00000002 /* Undelete */
+-#define FS_COMPR_FL 0x00000004 /* Compress file */
+-#define FS_SYNC_FL 0x00000008 /* Synchronous updates */
+-#define FS_IMMUTABLE_FL 0x00000010 /* Immutable file */
+-#define FS_APPEND_FL 0x00000020 /* writes to file may only append */
+-#define FS_NODUMP_FL 0x00000040 /* do not dump file */
+-#define FS_NOATIME_FL 0x00000080 /* do not update atime */
+-/* Reserved for compression usage... */
+-#define FS_DIRTY_FL 0x00000100
+-#define FS_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */
+-#define FS_NOCOMP_FL 0x00000400 /* Don't compress */
+-#define FS_ECOMPR_FL 0x00000800 /* Compression error */
+-/* End compression flags --- maybe not all used */
+-#define FS_BTREE_FL 0x00001000 /* btree format dir */
+-#define FS_INDEX_FL 0x00001000 /* hash-indexed directory */
+-#define FS_IMAGIC_FL 0x00002000 /* AFS directory */
+-#define FS_JOURNAL_DATA_FL 0x00004000 /* Reserved for ext3 */
+-#define FS_NOTAIL_FL 0x00008000 /* file tail should not be merged */
+-#define FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
+-#define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
+-#define FS_EXTENT_FL 0x00080000 /* Extents */
+-#define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */
+-#define FS_IXUNLINK_FL 0x08000000 /* Immutable invert on unlink */
+-#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
+-
+-#define FS_BARRIER_FL 0x04000000 /* Barrier for chroot() */
+-#define FS_COW_FL 0x20000000 /* Copy on Write marker */
+-
+-#define FS_FL_USER_VISIBLE 0x0103DFFF /* User visible flags */
+-#define FS_FL_USER_MODIFIABLE 0x010380FF /* User modifiable flags */
+-
+-#define SYNC_FILE_RANGE_WAIT_BEFORE 1
+-#define SYNC_FILE_RANGE_WRITE 2
+-#define SYNC_FILE_RANGE_WAIT_AFTER 4
+-
+-#ifdef __KERNEL__
+-
+-#include <linux/linkage.h>
+-#include <linux/wait.h>
+-#include <linux/types.h>
+-#include <linux/kdev_t.h>
+-#include <linux/dcache.h>
+-#include <linux/path.h>
+-#include <linux/stat.h>
+-#include <linux/cache.h>
+-#include <linux/kobject.h>
+-#include <linux/list.h>
+-#include <linux/radix-tree.h>
+-#include <linux/prio_tree.h>
+-#include <linux/init.h>
+-#include <linux/pid.h>
+-#include <linux/mutex.h>
+-#include <linux/capability.h>
+-#include <linux/semaphore.h>
+-
+-#include <asm/atomic.h>
+-#include <asm/byteorder.h>
+-
+-struct export_operations;
+-struct hd_geometry;
+-struct iovec;
+-struct nameidata;
+-struct kiocb;
+-struct pipe_inode_info;
+-struct poll_table_struct;
+-struct kstatfs;
+-struct vm_area_struct;
+-struct vfsmount;
+-
+-extern void __init inode_init(void);
+-extern void __init inode_init_early(void);
+-extern void __init files_init(unsigned long);
+-
+-struct buffer_head;
+-typedef int (get_block_t)(struct inode *inode, sector_t iblock,
+- struct buffer_head *bh_result, int create);
+-typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
+- ssize_t bytes, void *private);
+-
+-/*
+- * Attribute flags. These should be or-ed together to figure out what
+- * has been changed!
+- */
+-#define ATTR_MODE (1 << 0)
+-#define ATTR_UID (1 << 1)
+-#define ATTR_GID (1 << 2)
+-#define ATTR_SIZE (1 << 3)
+-#define ATTR_ATIME (1 << 4)
+-#define ATTR_MTIME (1 << 5)
+-#define ATTR_CTIME (1 << 6)
+-#define ATTR_ATIME_SET (1 << 7)
+-#define ATTR_MTIME_SET (1 << 8)
+-#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
+-#define ATTR_ATTR_FLAG (1 << 10)
+-#define ATTR_KILL_SUID (1 << 11)
+-#define ATTR_KILL_SGID (1 << 12)
+-#define ATTR_FILE (1 << 13)
+-#define ATTR_KILL_PRIV (1 << 14)
+-#define ATTR_OPEN (1 << 15) /* Truncating from open(O_TRUNC) */
+-#define ATTR_TIMES_SET (1 << 16)
+-#define ATTR_TAG (1 << 17)
+-
+-/*
+- * This is the Inode Attributes structure, used for notify_change(). It
+- * uses the above definitions as flags, to know which values have changed.
+- * Also, in this manner, a Filesystem can look at only the values it cares
+- * about. Basically, these are the attributes that the VFS layer can
+- * request to change from the FS layer.
+- *
+- * Derek Atkins <warlord@MIT.EDU> 94-10-20
+- */
+-struct iattr {
+- unsigned int ia_valid;
+- umode_t ia_mode;
+- uid_t ia_uid;
+- gid_t ia_gid;
+- tag_t ia_tag;
+- loff_t ia_size;
+- struct timespec ia_atime;
+- struct timespec ia_mtime;
+- struct timespec ia_ctime;
+-
+- /*
+- * Not an attribute, but an auxilary info for filesystems wanting to
+- * implement an ftruncate() like method. NOTE: filesystem should
+- * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL).
+- */
+- struct file *ia_file;
+-};
+-
+-#define ATTR_FLAG_BARRIER 512 /* Barrier for chroot() */
+-#define ATTR_FLAG_IXUNLINK 1024 /* Immutable invert on unlink */
+-
+-/*
+- * Includes for diskquotas.
+- */
+-#include <linux/quota.h>
+-
+-/**
+- * enum positive_aop_returns - aop return codes with specific semantics
+- *
+- * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
+- * completed, that the page is still locked, and
+- * should be considered active. The VM uses this hint
+- * to return the page to the active list -- it won't
+- * be a candidate for writeback again in the near
+- * future. Other callers must be careful to unlock
+- * the page if they get this return. Returned by
+- * writepage();
+- *
+- * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
+- * unlocked it and the page might have been truncated.
+- * The caller should back up to acquiring a new page and
+- * trying again. The aop will be taking reasonable
+- * precautions not to livelock. If the caller held a page
+- * reference, it should drop it before retrying. Returned
+- * by readpage().
+- *
+- * address_space_operation functions return these large constants to indicate
+- * special semantics to the caller. These are much larger than the bytes in a
+- * page to allow for functions that return the number of bytes operated on in a
+- * given page.
+- */
+-
+-enum positive_aop_returns {
+- AOP_WRITEPAGE_ACTIVATE = 0x80000,
+- AOP_TRUNCATED_PAGE = 0x80001,
+-};
+-
+-#define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */
+-#define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */
+-#define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct
+- * helper code (eg buffer layer)
+- * to clear GFP_FS from alloc */
+-
+-/*
+- * oh the beauties of C type declarations.
+- */
+-struct page;
+-struct address_space;
+-struct writeback_control;
+-
+-struct iov_iter {
+- const struct iovec *iov;
+- unsigned long nr_segs;
+- size_t iov_offset;
+- size_t count;
+-};
+-
+-size_t iov_iter_copy_from_user_atomic(struct page *page,
+- struct iov_iter *i, unsigned long offset, size_t bytes);
+-size_t iov_iter_copy_from_user(struct page *page,
+- struct iov_iter *i, unsigned long offset, size_t bytes);
+-void iov_iter_advance(struct iov_iter *i, size_t bytes);
+-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+-size_t iov_iter_single_seg_count(struct iov_iter *i);
+-
+-static inline void iov_iter_init(struct iov_iter *i,
+- const struct iovec *iov, unsigned long nr_segs,
+- size_t count, size_t written)
+-{
+- i->iov = iov;
+- i->nr_segs = nr_segs;
+- i->iov_offset = 0;
+- i->count = count + written;
+-
+- iov_iter_advance(i, written);
+-}
+-
+-static inline size_t iov_iter_count(struct iov_iter *i)
+-{
+- return i->count;
+-}
+-
+-/*
+- * "descriptor" for what we're up to with a read.
+- * This allows us to use the same read code yet
+- * have multiple different users of the data that
+- * we read from a file.
+- *
+- * The simplest case just copies the data to user
+- * mode.
+- */
+-typedef struct {
+- size_t written;
+- size_t count;
+- union {
+- char __user *buf;
+- void *data;
+- } arg;
+- int error;
+-} read_descriptor_t;
+-
+-typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
+- unsigned long, unsigned long);
+-
+-struct address_space_operations {
+- int (*writepage)(struct page *page, struct writeback_control *wbc);
+- int (*readpage)(struct file *, struct page *);
+- void (*sync_page)(struct page *);
+-
+- /* Write back some dirty pages from this mapping. */
+- int (*writepages)(struct address_space *, struct writeback_control *);
+-
+- /* Set a page dirty. Return true if this dirtied it */
+- int (*set_page_dirty)(struct page *page);
+-
+- int (*readpages)(struct file *filp, struct address_space *mapping,
+- struct list_head *pages, unsigned nr_pages);
+-
+- /*
+- * ext3 requires that a successful prepare_write() call be followed
+- * by a commit_write() call - they must be balanced
+- */
+- int (*prepare_write)(struct file *, struct page *, unsigned, unsigned);
+- int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
+-
+- int (*write_begin)(struct file *, struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned flags,
+- struct page **pagep, void **fsdata);
+- int (*write_end)(struct file *, struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned copied,
+- struct page *page, void *fsdata);
+-
+- /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
+- sector_t (*bmap)(struct address_space *, sector_t);
+- void (*invalidatepage) (struct page *, unsigned long);
+- int (*releasepage) (struct page *, gfp_t);
+- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
+- loff_t offset, unsigned long nr_segs);
+- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
+- void **, unsigned long *);
+- /* migrate the contents of a page to the specified target */
+- int (*migratepage) (struct address_space *,
+- struct page *, struct page *);
+- int (*launder_page) (struct page *);
+- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
+- unsigned long);
+-};
+-
+-/*
+- * pagecache_write_begin/pagecache_write_end must be used by general code
+- * to write into the pagecache.
+- */
+-int pagecache_write_begin(struct file *, struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned flags,
+- struct page **pagep, void **fsdata);
+-
+-int pagecache_write_end(struct file *, struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned copied,
+- struct page *page, void *fsdata);
+-
+-struct backing_dev_info;
+-struct address_space {
+- struct inode *host; /* owner: inode, block_device */
+- struct radix_tree_root page_tree; /* radix tree of all pages */
+- spinlock_t tree_lock; /* and lock protecting it */
+- unsigned int i_mmap_writable;/* count VM_SHARED mappings */
+- struct prio_tree_root i_mmap; /* tree of private and shared mappings */
+- struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
+- spinlock_t i_mmap_lock; /* protect tree, count, list */
+- unsigned int truncate_count; /* Cover race condition with truncate */
+- unsigned long nrpages; /* number of total pages */
+- pgoff_t writeback_index;/* writeback starts here */
+- const struct address_space_operations *a_ops; /* methods */
+- unsigned long flags; /* error bits/gfp mask */
+- struct backing_dev_info *backing_dev_info; /* device readahead, etc */
+- spinlock_t private_lock; /* for use by the address_space */
+- struct list_head private_list; /* ditto */
+- struct address_space *assoc_mapping; /* ditto */
+-} __attribute__((aligned(sizeof(long))));
+- /*
+- * On most architectures that alignment is already the case; but
+- * must be enforced here for CRIS, to let the least signficant bit
+- * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
+- */
+-
+-struct block_device {
+- dev_t bd_dev; /* not a kdev_t - it's a search key */
+- struct inode * bd_inode; /* will die */
+- int bd_openers;
+- struct mutex bd_mutex; /* open/close mutex */
+- struct semaphore bd_mount_sem;
+- struct list_head bd_inodes;
+- void * bd_holder;
+- int bd_holders;
+-#ifdef CONFIG_SYSFS
+- struct list_head bd_holder_list;
+-#endif
+- struct block_device * bd_contains;
+- unsigned bd_block_size;
+- struct hd_struct * bd_part;
+- /* number of times partitions within this device have been opened. */
+- unsigned bd_part_count;
+- int bd_invalidated;
+- struct gendisk * bd_disk;
+- struct list_head bd_list;
+- struct backing_dev_info *bd_inode_backing_dev_info;
+- /*
+- * Private data. You must have bd_claim'ed the block_device
+- * to use this. NOTE: bd_claim allows an owner to claim
+- * the same device multiple times, the owner must take special
+- * care to not mess up bd_private for that case.
+- */
+- unsigned long bd_private;
+-};
+-
+-/*
+- * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
+- * radix trees
+- */
+-#define PAGECACHE_TAG_DIRTY 0
+-#define PAGECACHE_TAG_WRITEBACK 1
+-
+-int mapping_tagged(struct address_space *mapping, int tag);
+-
+-/*
+- * Might pages of this file be mapped into userspace?
+- */
+-static inline int mapping_mapped(struct address_space *mapping)
+-{
+- return !prio_tree_empty(&mapping->i_mmap) ||
+- !list_empty(&mapping->i_mmap_nonlinear);
+-}
+-
+-/*
+- * Might pages of this file have been modified in userspace?
+- * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
+- * marks vma as VM_SHARED if it is shared, and the file was opened for
+- * writing i.e. vma may be mprotected writable even if now readonly.
+- */
+-static inline int mapping_writably_mapped(struct address_space *mapping)
+-{
+- return mapping->i_mmap_writable != 0;
+-}
+-
+-/*
+- * Use sequence counter to get consistent i_size on 32-bit processors.
+- */
+-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+-#include <linux/seqlock.h>
+-#define __NEED_I_SIZE_ORDERED
+-#define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount)
+-#else
+-#define i_size_ordered_init(inode) do { } while (0)
+-#endif
+-
+-struct inode {
+- struct hlist_node i_hash;
+- struct list_head i_list;
+- struct list_head i_sb_list;
+- struct list_head i_dentry;
+- unsigned long i_ino;
+- atomic_t i_count;
+- unsigned int i_nlink;
+- uid_t i_uid;
+- gid_t i_gid;
+- tag_t i_tag;
+- dev_t i_rdev;
+- dev_t i_mdev;
+- u64 i_version;
+- loff_t i_size;
+-#ifdef __NEED_I_SIZE_ORDERED
+- seqcount_t i_size_seqcount;
+-#endif
+- struct timespec i_atime;
+- struct timespec i_mtime;
+- struct timespec i_ctime;
+- unsigned int i_blkbits;
+- blkcnt_t i_blocks;
+- unsigned short i_bytes;
+- umode_t i_mode;
+- spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
+- struct mutex i_mutex;
+- struct rw_semaphore i_alloc_sem;
+- const struct inode_operations *i_op;
+- const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
+- struct super_block *i_sb;
+- struct file_lock *i_flock;
+- struct address_space *i_mapping;
+- struct address_space i_data;
+-#ifdef CONFIG_QUOTA
+- struct dquot *i_dquot[MAXQUOTAS];
+-#endif
+- struct list_head i_devices;
+- union {
+- struct pipe_inode_info *i_pipe;
+- struct block_device *i_bdev;
+- struct cdev *i_cdev;
+- };
+- int i_cindex;
+-
+- __u32 i_generation;
+-
+-#ifdef CONFIG_DNOTIFY
+- unsigned long i_dnotify_mask; /* Directory notify events */
+- struct dnotify_struct *i_dnotify; /* for directory notifications */
+-#endif
+-
+-#ifdef CONFIG_INOTIFY
+- struct list_head inotify_watches; /* watches on this inode */
+- struct mutex inotify_mutex; /* protects the watches list */
+-#endif
+-
+- unsigned long i_state;
+- unsigned long dirtied_when; /* jiffies of first dirtying */
+-
+- unsigned short i_flags;
+- unsigned short i_vflags;
+-
+- atomic_t i_writecount;
+-#ifdef CONFIG_SECURITY
+- void *i_security;
+-#endif
+- void *i_private; /* fs or device private pointer */
+-};
+-
+-/*
+- * inode->i_mutex nesting subclasses for the lock validator:
+- *
+- * 0: the object of the current VFS operation
+- * 1: parent
+- * 2: child/target
+- * 3: quota file
+- *
+- * The locking order between these classes is
+- * parent -> child -> normal -> xattr -> quota
+- */
+-enum inode_i_mutex_lock_class
+-{
+- I_MUTEX_NORMAL,
+- I_MUTEX_PARENT,
+- I_MUTEX_CHILD,
+- I_MUTEX_XATTR,
+- I_MUTEX_QUOTA
+-};
+-
+-extern void inode_double_lock(struct inode *inode1, struct inode *inode2);
+-extern void inode_double_unlock(struct inode *inode1, struct inode *inode2);
+-
+-/*
+- * NOTE: in a 32bit arch with a preemptable kernel and
+- * an UP compile the i_size_read/write must be atomic
+- * with respect to the local cpu (unlike with preempt disabled),
+- * but they don't need to be atomic with respect to other cpus like in
+- * true SMP (so they need either to either locally disable irq around
+- * the read or for example on x86 they can be still implemented as a
+- * cmpxchg8b without the need of the lock prefix). For SMP compiles
+- * and 64bit archs it makes no difference if preempt is enabled or not.
+- */
+-static inline loff_t i_size_read(const struct inode *inode)
+-{
+-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+- loff_t i_size;
+- unsigned int seq;
+-
+- do {
+- seq = read_seqcount_begin(&inode->i_size_seqcount);
+- i_size = inode->i_size;
+- } while (read_seqcount_retry(&inode->i_size_seqcount, seq));
+- return i_size;
+-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+- loff_t i_size;
+-
+- preempt_disable();
+- i_size = inode->i_size;
+- preempt_enable();
+- return i_size;
+-#else
+- return inode->i_size;
+-#endif
+-}
+-
+-/*
+- * NOTE: unlike i_size_read(), i_size_write() does need locking around it
+- * (normally i_mutex), otherwise on 32bit/SMP an update of i_size_seqcount
+- * can be lost, resulting in subsequent i_size_read() calls spinning forever.
+- */
+-static inline void i_size_write(struct inode *inode, loff_t i_size)
+-{
+-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+- write_seqcount_begin(&inode->i_size_seqcount);
+- inode->i_size = i_size;
+- write_seqcount_end(&inode->i_size_seqcount);
+-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+- preempt_disable();
+- inode->i_size = i_size;
+- preempt_enable();
+-#else
+- inode->i_size = i_size;
+-#endif
+-}
+-
+-static inline unsigned iminor(const struct inode *inode)
+-{
+- return MINOR(inode->i_mdev);
+-}
+-
+-static inline unsigned imajor(const struct inode *inode)
+-{
+- return MAJOR(inode->i_mdev);
+-}
+-
+-extern struct block_device *I_BDEV(struct inode *inode);
+-
+-struct fown_struct {
+- rwlock_t lock; /* protects pid, uid, euid fields */
+- struct pid *pid; /* pid or -pgrp where SIGIO should be sent */
+- enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */
+- uid_t uid, euid; /* uid/euid of process setting the owner */
+- int signum; /* posix.1b rt signal to be delivered on IO */
+-};
+-
+-/*
+- * Track a single file's readahead state
+- */
+-struct file_ra_state {
+- pgoff_t start; /* where readahead started */
+- unsigned int size; /* # of readahead pages */
+- unsigned int async_size; /* do asynchronous readahead when
+- there are only # of pages ahead */
+-
+- unsigned int ra_pages; /* Maximum readahead window */
+- int mmap_miss; /* Cache miss stat for mmap accesses */
+- loff_t prev_pos; /* Cache last read() position */
+-};
+-
+-/*
+- * Check if @index falls in the readahead windows.
+- */
+-static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
+-{
+- return (index >= ra->start &&
+- index < ra->start + ra->size);
+-}
+-
+-#define FILE_MNT_WRITE_TAKEN 1
+-#define FILE_MNT_WRITE_RELEASED 2
+-
+-struct file {
+- /*
+- * fu_list becomes invalid after file_free is called and queued via
+- * fu_rcuhead for RCU freeing
+- */
+- union {
+- struct list_head fu_list;
+- struct rcu_head fu_rcuhead;
+- } f_u;
+- struct path f_path;
+-#define f_dentry f_path.dentry
+-#define f_vfsmnt f_path.mnt
+- const struct file_operations *f_op;
+- atomic_long_t f_count;
+- unsigned int f_flags;
+- mode_t f_mode;
+- loff_t f_pos;
+- struct fown_struct f_owner;
+- unsigned int f_uid, f_gid;
+- xid_t f_xid;
+- struct file_ra_state f_ra;
+-
+- u64 f_version;
+-#ifdef CONFIG_SECURITY
+- void *f_security;
+-#endif
+- /* needed for tty driver, and maybe others */
+- void *private_data;
+-
+-#ifdef CONFIG_EPOLL
+- /* Used by fs/eventpoll.c to link all the hooks to this file */
+- struct list_head f_ep_links;
+- spinlock_t f_ep_lock;
+-#endif /* #ifdef CONFIG_EPOLL */
+- struct address_space *f_mapping;
+-#ifdef CONFIG_DEBUG_WRITECOUNT
+- unsigned long f_mnt_write_state;
+-#endif
+-};
+-extern spinlock_t files_lock;
+-#define file_list_lock() spin_lock(&files_lock);
+-#define file_list_unlock() spin_unlock(&files_lock);
+-
+-#define get_file(x) atomic_long_inc(&(x)->f_count)
+-#define file_count(x) atomic_long_read(&(x)->f_count)
+-
+-#ifdef CONFIG_DEBUG_WRITECOUNT
+-static inline void file_take_write(struct file *f)
+-{
+- WARN_ON(f->f_mnt_write_state != 0);
+- f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN;
+-}
+-static inline void file_release_write(struct file *f)
+-{
+- f->f_mnt_write_state |= FILE_MNT_WRITE_RELEASED;
+-}
+-static inline void file_reset_write(struct file *f)
+-{
+- f->f_mnt_write_state = 0;
+-}
+-static inline void file_check_state(struct file *f)
+-{
+- /*
+- * At this point, either both or neither of these bits
+- * should be set.
+- */
+- WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN);
+- WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_RELEASED);
+-}
+-static inline int file_check_writeable(struct file *f)
+-{
+- if (f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN)
+- return 0;
+- printk(KERN_WARNING "writeable file with no "
+- "mnt_want_write()\n");
+- WARN_ON(1);
+- return -EINVAL;
+-}
+-#else /* !CONFIG_DEBUG_WRITECOUNT */
+-static inline void file_take_write(struct file *filp) {}
+-static inline void file_release_write(struct file *filp) {}
+-static inline void file_reset_write(struct file *filp) {}
+-static inline void file_check_state(struct file *filp) {}
+-static inline int file_check_writeable(struct file *filp)
+-{
+- return 0;
+-}
+-#endif /* CONFIG_DEBUG_WRITECOUNT */
+-
+-#define MAX_NON_LFS ((1UL<<31) - 1)
+-
+-/* Page cache limit. The filesystems should put that into their s_maxbytes
+- limits, otherwise bad things can happen in VM. */
+-#if BITS_PER_LONG==32
+-#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
+-#elif BITS_PER_LONG==64
+-#define MAX_LFS_FILESIZE 0x7fffffffffffffffUL
+-#endif
+-
+-#define FL_POSIX 1
+-#define FL_FLOCK 2
+-#define FL_ACCESS 8 /* not trying to lock, just looking */
+-#define FL_EXISTS 16 /* when unlocking, test for existence */
+-#define FL_LEASE 32 /* lease held on this file */
+-#define FL_CLOSE 64 /* unlock on close */
+-#define FL_SLEEP 128 /* A blocking lock */
+-
+-/*
+- * Special return value from posix_lock_file() and vfs_lock_file() for
+- * asynchronous locking.
+- */
+-#define FILE_LOCK_DEFERRED 1
+-
+-/*
+- * The POSIX file lock owner is determined by
+- * the "struct files_struct" in the thread group
+- * (or NULL for no owner - BSD locks).
+- *
+- * Lockd stuffs a "host" pointer into this.
+- */
+-typedef struct files_struct *fl_owner_t;
+-
+-struct file_lock_operations {
+- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+- void (*fl_release_private)(struct file_lock *);
+-};
+-
+-struct lock_manager_operations {
+- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
+- void (*fl_notify)(struct file_lock *); /* unblock callback */
+- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
+- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+- void (*fl_release_private)(struct file_lock *);
+- void (*fl_break)(struct file_lock *);
+- int (*fl_mylease)(struct file_lock *, struct file_lock *);
+- int (*fl_change)(struct file_lock **, int);
+-};
+-
+-/* that will die - we need it for nfs_lock_info */
+-#include <linux/nfs_fs_i.h>
+-
+-struct file_lock {
+- struct file_lock *fl_next; /* singly linked list for this inode */
+- struct list_head fl_link; /* doubly linked list of all locks */
+- struct list_head fl_block; /* circular list of blocked processes */
+- fl_owner_t fl_owner;
+- unsigned char fl_flags;
+- unsigned char fl_type;
+- unsigned int fl_pid;
+- struct pid *fl_nspid;
+- wait_queue_head_t fl_wait;
+- struct file *fl_file;
+- loff_t fl_start;
+- loff_t fl_end;
+- xid_t fl_xid;
+-
+- struct fasync_struct * fl_fasync; /* for lease break notifications */
+- unsigned long fl_break_time; /* for nonblocking lease breaks */
+-
+- struct file_lock_operations *fl_ops; /* Callbacks for filesystems */
+- struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */
+- union {
+- struct nfs_lock_info nfs_fl;
+- struct nfs4_lock_info nfs4_fl;
+- struct {
+- struct list_head link; /* link in AFS vnode's pending_locks list */
+- int state; /* state of grant or error if -ve */
+- } afs;
+- } fl_u;
+-};
+-
+-/* The following constant reflects the upper bound of the file/locking space */
+-#ifndef OFFSET_MAX
+-#define INT_LIMIT(x) (~((x)1 << (sizeof(x)*8 - 1)))
+-#define OFFSET_MAX INT_LIMIT(loff_t)
+-#define OFFT_OFFSET_MAX INT_LIMIT(off_t)
+-#endif
+-
+-#include <linux/fcntl.h>
+-
+-extern int fcntl_getlk(struct file *, struct flock __user *);
+-extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
+- struct flock __user *);
+-
+-#if BITS_PER_LONG == 32
+-extern int fcntl_getlk64(struct file *, struct flock64 __user *);
+-extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
+- struct flock64 __user *);
+-#endif
+-
+-extern void send_sigio(struct fown_struct *fown, int fd, int band);
+-extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
+-extern int fcntl_getlease(struct file *filp);
+-
+-/* fs/sync.c */
+-extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
+- loff_t endbyte, unsigned int flags);
+-
+-/* fs/locks.c */
+-extern void locks_init_lock(struct file_lock *);
+-extern void locks_copy_lock(struct file_lock *, struct file_lock *);
+-extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
+-extern void locks_remove_posix(struct file *, fl_owner_t);
+-extern void locks_remove_flock(struct file *);
+-extern void posix_test_lock(struct file *, struct file_lock *);
+-extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
+-extern int posix_lock_file_wait(struct file *, struct file_lock *);
+-extern int posix_unblock_lock(struct file *, struct file_lock *);
+-extern int vfs_test_lock(struct file *, struct file_lock *);
+-extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
+-extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
+-extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
+-extern int __break_lease(struct inode *inode, unsigned int flags);
+-extern void lease_get_mtime(struct inode *, struct timespec *time);
+-extern int generic_setlease(struct file *, long, struct file_lock **);
+-extern int vfs_setlease(struct file *, long, struct file_lock **);
+-extern int lease_modify(struct file_lock **, int);
+-extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
+-extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
+-extern struct seq_operations locks_seq_operations;
+-
+-struct fasync_struct {
+- int magic;
+- int fa_fd;
+- struct fasync_struct *fa_next; /* singly linked list */
+- struct file *fa_file;
+-};
+-
+-#define FASYNC_MAGIC 0x4601
+-
+-/* SMP safe fasync helpers: */
+-extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
+-/* can be called from interrupts */
+-extern void kill_fasync(struct fasync_struct **, int, int);
+-/* only for net: no internal synchronization */
+-extern void __kill_fasync(struct fasync_struct *, int, int);
+-
+-extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
+-extern int f_setown(struct file *filp, unsigned long arg, int force);
+-extern void f_delown(struct file *filp);
+-extern pid_t f_getown(struct file *filp);
+-extern int send_sigurg(struct fown_struct *fown);
+-
+-/*
+- * Umount options
+- */
+-
+-#define MNT_FORCE 0x00000001 /* Attempt to forcibily umount */
+-#define MNT_DETACH 0x00000002 /* Just detach from the tree */
+-#define MNT_EXPIRE 0x00000004 /* Mark for expiry */
+-
+-extern struct list_head super_blocks;
+-extern spinlock_t sb_lock;
+-
+-#define sb_entry(list) list_entry((list), struct super_block, s_list)
+-#define S_BIAS (1<<30)
+-struct super_block {
+- struct list_head s_list; /* Keep this first */
+- dev_t s_dev; /* search index; _not_ kdev_t */
+- unsigned long s_blocksize;
+- unsigned char s_blocksize_bits;
+- unsigned char s_dirt;
+- unsigned long long s_maxbytes; /* Max file size */
+- struct file_system_type *s_type;
+- const struct super_operations *s_op;
+- struct dquot_operations *dq_op;
+- struct quotactl_ops *s_qcop;
+- const struct export_operations *s_export_op;
+- unsigned long s_flags;
+- unsigned long s_magic;
+- struct dentry *s_root;
+- struct rw_semaphore s_umount;
+- struct mutex s_lock;
+- int s_count;
+- int s_need_sync_fs;
+- atomic_t s_active;
+-#ifdef CONFIG_SECURITY
+- void *s_security;
+-#endif
+- struct xattr_handler **s_xattr;
+-
+- struct list_head s_inodes; /* all inodes */
+- struct list_head s_dirty; /* dirty inodes */
+- struct list_head s_io; /* parked for writeback */
+- struct list_head s_more_io; /* parked for more writeback */
+- struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */
+- struct list_head s_files;
+- /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
+- struct list_head s_dentry_lru; /* unused dentry lru */
+- int s_nr_dentry_unused; /* # of dentry on lru */
+-
+- struct block_device *s_bdev;
+- struct mtd_info *s_mtd;
+- struct list_head s_instances;
+- struct quota_info s_dquot; /* Diskquota specific options */
+-
+- int s_frozen;
+- wait_queue_head_t s_wait_unfrozen;
+-
+- char s_id[32]; /* Informational name */
+-
+- void *s_fs_info; /* Filesystem private info */
+-
+- /*
+- * The next field is for VFS *only*. No filesystems have any business
+- * even looking at it. You had been warned.
+- */
+- struct mutex s_vfs_rename_mutex; /* Kludge */
+-
+- /* Granularity of c/m/atime in ns.
+- Cannot be worse than a second */
+- u32 s_time_gran;
+-
+- /*
+- * Filesystem subtype. If non-empty the filesystem type field
+- * in /proc/mounts will be "type.subtype"
+- */
+- char *s_subtype;
+-
+- /*
+- * Saved mount options for lazy filesystems using
+- * generic_show_options()
+- */
+- char *s_options;
+-};
+-
+-extern struct timespec current_fs_time(struct super_block *sb);
+-
+-/*
+- * Snapshotting support.
+- */
+-enum {
+- SB_UNFROZEN = 0,
+- SB_FREEZE_WRITE = 1,
+- SB_FREEZE_TRANS = 2,
+-};
+-
+-#define vfs_check_frozen(sb, level) \
+- wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level)))
+-
+-#define get_fs_excl() atomic_inc(¤t->fs_excl)
+-#define put_fs_excl() atomic_dec(¤t->fs_excl)
+-#define has_fs_excl() atomic_read(¤t->fs_excl)
+-
+-#define is_owner_or_cap(inode) \
+- ((current->fsuid == (inode)->i_uid) || capable(CAP_FOWNER))
+-
+-/* not quite ready to be deprecated, but... */
+-extern void lock_super(struct super_block *);
+-extern void unlock_super(struct super_block *);
+-
+-/*
+- * VFS helper functions..
+- */
+-extern int vfs_permission(struct nameidata *, int);
+-extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
+-extern int vfs_mkdir(struct inode *, struct dentry *, int);
+-extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t);
+-extern int vfs_symlink(struct inode *, struct dentry *, const char *);
+-extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
+-extern int vfs_rmdir(struct inode *, struct dentry *);
+-extern int vfs_unlink(struct inode *, struct dentry *);
+-extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+-
+-/*
+- * VFS dentry helper functions.
+- */
+-extern void dentry_unhash(struct dentry *dentry);
+-
+-/*
+- * VFS file helper functions.
+- */
+-extern int file_permission(struct file *, int);
+-
+-/*
+- * File types
+- *
+- * NOTE! These match bits 12..15 of stat.st_mode
+- * (ie "(i_mode >> 12) & 15").
+- */
+-#define DT_UNKNOWN 0
+-#define DT_FIFO 1
+-#define DT_CHR 2
+-#define DT_DIR 4
+-#define DT_BLK 6
+-#define DT_REG 8
+-#define DT_LNK 10
+-#define DT_SOCK 12
+-#define DT_WHT 14
+-
+-#define OSYNC_METADATA (1<<0)
+-#define OSYNC_DATA (1<<1)
+-#define OSYNC_INODE (1<<2)
+-int generic_osync_inode(struct inode *, struct address_space *, int);
+-
+-/*
+- * This is the "filldir" function type, used by readdir() to let
+- * the kernel specify what kind of dirent layout it wants to have.
+- * This allows the kernel to read directories into kernel space or
+- * to have different dirent layouts depending on the binary type.
+- */
+-typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
+-
+-struct block_device_operations {
+- int (*open) (struct inode *, struct file *);
+- int (*release) (struct inode *, struct file *);
+- int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
+- long (*unlocked_ioctl) (struct file *, unsigned, unsigned long);
+- long (*compat_ioctl) (struct file *, unsigned, unsigned long);
+- int (*direct_access) (struct block_device *, sector_t,
+- void **, unsigned long *);
+- int (*media_changed) (struct gendisk *);
+- int (*revalidate_disk) (struct gendisk *);
+- int (*getgeo)(struct block_device *, struct hd_geometry *);
+- struct module *owner;
+-};
+-
+-/* These macros are for out of kernel modules to test that
+- * the kernel supports the unlocked_ioctl and compat_ioctl
+- * fields in struct file_operations. */
+-#define HAVE_COMPAT_IOCTL 1
+-#define HAVE_UNLOCKED_IOCTL 1
+-
+-/*
+- * NOTE:
+- * read, write, poll, fsync, readv, writev, unlocked_ioctl and compat_ioctl
+- * can be called without the big kernel lock held in all filesystems.
+- */
+-struct file_operations {
+- struct module *owner;
+- loff_t (*llseek) (struct file *, loff_t, int);
+- ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
+- ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
+- ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+- ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+- int (*readdir) (struct file *, void *, filldir_t);
+- unsigned int (*poll) (struct file *, struct poll_table_struct *);
+- int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long);
+- long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
+- long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
+- int (*mmap) (struct file *, struct vm_area_struct *);
+- int (*open) (struct inode *, struct file *);
+- int (*flush) (struct file *, fl_owner_t id);
+- int (*release) (struct inode *, struct file *);
+- int (*fsync) (struct file *, struct dentry *, int datasync);
+- int (*aio_fsync) (struct kiocb *, int datasync);
+- int (*fasync) (int, struct file *, int);
+- int (*lock) (struct file *, int, struct file_lock *);
+- ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
+- unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+- int (*check_flags)(int);
+- int (*dir_notify)(struct file *filp, unsigned long arg);
+- int (*flock) (struct file *, int, struct file_lock *);
+- ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
+- ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
+- int (*setlease)(struct file *, long, struct file_lock **);
+-};
+-
+-struct inode_operations {
+- int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
+- struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
+- int (*link) (struct dentry *,struct inode *,struct dentry *);
+- int (*unlink) (struct inode *,struct dentry *);
+- int (*symlink) (struct inode *,struct dentry *,const char *);
+- int (*mkdir) (struct inode *,struct dentry *,int);
+- int (*rmdir) (struct inode *,struct dentry *);
+- int (*mknod) (struct inode *,struct dentry *,int,dev_t);
+- int (*rename) (struct inode *, struct dentry *,
+- struct inode *, struct dentry *);
+- int (*readlink) (struct dentry *, char __user *,int);
+- void * (*follow_link) (struct dentry *, struct nameidata *);
+- void (*put_link) (struct dentry *, struct nameidata *, void *);
+- void (*truncate) (struct inode *);
+- int (*permission) (struct inode *, int);
+- int (*setattr) (struct dentry *, struct iattr *);
+- int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
+- int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
+- ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
+- ssize_t (*listxattr) (struct dentry *, char *, size_t);
+- int (*removexattr) (struct dentry *, const char *);
+- void (*truncate_range)(struct inode *, loff_t, loff_t);
+- long (*fallocate)(struct inode *inode, int mode, loff_t offset,
+- loff_t len);
+- int (*sync_flags) (struct inode *);
+-};
+-
+-struct seq_file;
+-
+-ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
+- unsigned long nr_segs, unsigned long fast_segs,
+- struct iovec *fast_pointer,
+- struct iovec **ret_pointer);
+-
+-extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
+-extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
+-extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
+- unsigned long, loff_t *);
+-extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
+- unsigned long, loff_t *);
+-ssize_t vfs_sendfile(struct file *, struct file *, loff_t *, size_t, loff_t);
+-
+-struct super_operations {
+- struct inode *(*alloc_inode)(struct super_block *sb);
+- void (*destroy_inode)(struct inode *);
+-
+- void (*dirty_inode) (struct inode *);
+- int (*write_inode) (struct inode *, int);
+- void (*drop_inode) (struct inode *);
+- void (*delete_inode) (struct inode *);
+- void (*put_super) (struct super_block *);
+- void (*write_super) (struct super_block *);
+- int (*sync_fs)(struct super_block *sb, int wait);
+- void (*write_super_lockfs) (struct super_block *);
+- void (*unlockfs) (struct super_block *);
+- int (*statfs) (struct dentry *, struct kstatfs *);
+- int (*remount_fs) (struct super_block *, int *, char *);
+- void (*clear_inode) (struct inode *);
+- void (*umount_begin) (struct super_block *);
+-
+- int (*show_options)(struct seq_file *, struct vfsmount *);
+- int (*show_stats)(struct seq_file *, struct vfsmount *);
+-#ifdef CONFIG_QUOTA
+- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
+- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
+-#endif
+-};
+-
+-/*
+- * Inode state bits. Protected by inode_lock.
+- *
+- * Three bits determine the dirty state of the inode, I_DIRTY_SYNC,
+- * I_DIRTY_DATASYNC and I_DIRTY_PAGES.
+- *
+- * Four bits define the lifetime of an inode. Initially, inodes are I_NEW,
+- * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
+- * various stages of removing an inode.
+- *
+- * Two bits are used for locking and completion notification, I_LOCK and I_SYNC.
+- *
+- * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
+- * fdatasync(). i_atime is the usual cause.
+- * I_DIRTY_DATASYNC Data-related inode changes pending. We keep track of
+- * these changes separately from I_DIRTY_SYNC so that we
+- * don't have to write inode on fdatasync() when only
+- * mtime has changed in it.
+- * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
+- * I_NEW get_new_inode() sets i_state to I_LOCK|I_NEW. Both
+- * are cleared by unlock_new_inode(), called from iget().
+- * I_WILL_FREE Must be set when calling write_inode_now() if i_count
+- * is zero. I_FREEING must be set when I_WILL_FREE is
+- * cleared.
+- * I_FREEING Set when inode is about to be freed but still has dirty
+- * pages or buffers attached or the inode itself is still
+- * dirty.
+- * I_CLEAR Set by clear_inode(). In this state the inode is clean
+- * and can be destroyed.
+- *
+- * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
+- * prohibited for many purposes. iget() must wait for
+- * the inode to be completely released, then create it
+- * anew. Other functions will just ignore such inodes,
+- * if appropriate. I_LOCK is used for waiting.
+- *
+- * I_LOCK Serves as both a mutex and completion notification.
+- * New inodes set I_LOCK. If two processes both create
+- * the same inode, one of them will release its inode and
+- * wait for I_LOCK to be released before returning.
+- * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
+- * also cause waiting on I_LOCK, without I_LOCK actually
+- * being set. find_inode() uses this to prevent returning
+- * nearly-dead inodes.
+- * I_SYNC Similar to I_LOCK, but limited in scope to writeback
+- * of inode dirty data. Having a separate lock for this
+- * purpose reduces latency and prevents some filesystem-
+- * specific deadlocks.
+- *
+- * Q: What is the difference between I_WILL_FREE and I_FREEING?
+- * Q: igrab() only checks on (I_FREEING|I_WILL_FREE). Should it also check on
+- * I_CLEAR? If not, why?
+- */
+-#define I_DIRTY_SYNC 1
+-#define I_DIRTY_DATASYNC 2
+-#define I_DIRTY_PAGES 4
+-#define I_NEW 8
+-#define I_WILL_FREE 16
+-#define I_FREEING 32
+-#define I_CLEAR 64
+-#define __I_LOCK 7
+-#define I_LOCK (1 << __I_LOCK)
+-#define __I_SYNC 8
+-#define I_SYNC (1 << __I_SYNC)
+-
+-#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
+-
+-extern void __mark_inode_dirty(struct inode *, int);
+-static inline void mark_inode_dirty(struct inode *inode)
+-{
+- __mark_inode_dirty(inode, I_DIRTY);
+-}
+-
+-static inline void mark_inode_dirty_sync(struct inode *inode)
+-{
+- __mark_inode_dirty(inode, I_DIRTY_SYNC);
+-}
+-
+-/**
+- * inc_nlink - directly increment an inode's link count
+- * @inode: inode
+- *
+- * This is a low-level filesystem helper to replace any
+- * direct filesystem manipulation of i_nlink. Currently,
+- * it is only here for parity with dec_nlink().
+- */
+-static inline void inc_nlink(struct inode *inode)
+-{
+- inode->i_nlink++;
+-}
+-
+-static inline void inode_inc_link_count(struct inode *inode)
+-{
+- inc_nlink(inode);
+- mark_inode_dirty(inode);
+-}
+-
+-/**
+- * drop_nlink - directly drop an inode's link count
+- * @inode: inode
+- *
+- * This is a low-level filesystem helper to replace any
+- * direct filesystem manipulation of i_nlink. In cases
+- * where we are attempting to track writes to the
+- * filesystem, a decrement to zero means an imminent
+- * write when the file is truncated and actually unlinked
+- * on the filesystem.
+- */
+-static inline void drop_nlink(struct inode *inode)
+-{
+- inode->i_nlink--;
+-}
+-
+-/**
+- * clear_nlink - directly zero an inode's link count
+- * @inode: inode
+- *
+- * This is a low-level filesystem helper to replace any
+- * direct filesystem manipulation of i_nlink. See
+- * drop_nlink() for why we care about i_nlink hitting zero.
+- */
+-static inline void clear_nlink(struct inode *inode)
+-{
+- inode->i_nlink = 0;
+-}
+-
+-static inline void inode_dec_link_count(struct inode *inode)
+-{
+- drop_nlink(inode);
+- mark_inode_dirty(inode);
+-}
+-
+-/**
+- * inode_inc_iversion - increments i_version
+- * @inode: inode that need to be updated
+- *
+- * Every time the inode is modified, the i_version field will be incremented.
+- * The filesystem has to be mounted with i_version flag
+- */
+-
+-static inline void inode_inc_iversion(struct inode *inode)
+-{
+- spin_lock(&inode->i_lock);
+- inode->i_version++;
+- spin_unlock(&inode->i_lock);
+-}
+-
+-extern void touch_atime(struct vfsmount *mnt, struct dentry *dentry);
+-static inline void file_accessed(struct file *file)
+-{
+- if (!(file->f_flags & O_NOATIME))
+- touch_atime(file->f_path.mnt, file->f_path.dentry);
+-}
+-
+-int sync_inode(struct inode *inode, struct writeback_control *wbc);
+-
+-struct file_system_type {
+- const char *name;
+- int fs_flags;
+- int (*get_sb) (struct file_system_type *, int,
+- const char *, void *, struct vfsmount *);
+- void (*kill_sb) (struct super_block *);
+- struct module *owner;
+- struct file_system_type * next;
+- struct list_head fs_supers;
+-
+- struct lock_class_key s_lock_key;
+- struct lock_class_key s_umount_key;
+-
+- struct lock_class_key i_lock_key;
+- struct lock_class_key i_mutex_key;
+- struct lock_class_key i_mutex_dir_key;
+- struct lock_class_key i_alloc_sem_key;
+-};
+-
+-extern int get_sb_bdev(struct file_system_type *fs_type,
+- int flags, const char *dev_name, void *data,
+- int (*fill_super)(struct super_block *, void *, int),
+- struct vfsmount *mnt);
+-extern int get_sb_single(struct file_system_type *fs_type,
+- int flags, void *data,
+- int (*fill_super)(struct super_block *, void *, int),
+- struct vfsmount *mnt);
+-extern int get_sb_nodev(struct file_system_type *fs_type,
+- int flags, void *data,
+- int (*fill_super)(struct super_block *, void *, int),
+- struct vfsmount *mnt);
+-void generic_shutdown_super(struct super_block *sb);
+-void kill_block_super(struct super_block *sb);
+-void kill_anon_super(struct super_block *sb);
+-void kill_litter_super(struct super_block *sb);
+-void deactivate_super(struct super_block *sb);
+-int set_anon_super(struct super_block *s, void *data);
+-struct super_block *sget(struct file_system_type *type,
+- int (*test)(struct super_block *,void *),
+- int (*set)(struct super_block *,void *),
+- void *data);
+-extern int get_sb_pseudo(struct file_system_type *, char *,
+- const struct super_operations *ops, unsigned long,
+- struct vfsmount *mnt);
+-extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
+-int __put_super_and_need_restart(struct super_block *sb);
+-void unnamed_dev_init(void);
+-
+-/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
+-#define fops_get(fops) \
+- (((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
+-#define fops_put(fops) \
+- do { if (fops) module_put((fops)->owner); } while(0)
+-
+-extern int register_filesystem(struct file_system_type *);
+-extern int unregister_filesystem(struct file_system_type *);
+-extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
+-#define kern_mount(type) kern_mount_data(type, NULL)
+-extern int may_umount_tree(struct vfsmount *);
+-extern int may_umount(struct vfsmount *);
+-extern long do_mount(char *, char *, char *, unsigned long, void *);
+-extern struct vfsmount *collect_mounts(struct vfsmount *, struct dentry *);
+-extern void drop_collected_mounts(struct vfsmount *);
+-
+-extern int vfs_statfs(struct dentry *, struct kstatfs *);
+-
+-/* /sys/fs */
+-extern struct kobject *fs_kobj;
+-
+-#define FLOCK_VERIFY_READ 1
+-#define FLOCK_VERIFY_WRITE 2
+-
+-extern int locks_mandatory_locked(struct inode *);
+-extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t);
+-
+-/*
+- * Candidates for mandatory locking have the setgid bit set
+- * but no group execute bit - an otherwise meaningless combination.
+- */
+-
+-static inline int __mandatory_lock(struct inode *ino)
+-{
+- return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID;
+-}
+-
+-/*
+- * ... and these candidates should be on MS_MANDLOCK mounted fs,
+- * otherwise these will be advisory locks
+- */
+-
+-static inline int mandatory_lock(struct inode *ino)
+-{
+- return IS_MANDLOCK(ino) && __mandatory_lock(ino);
+-}
+-
+-static inline int locks_verify_locked(struct inode *inode)
+-{
+- if (mandatory_lock(inode))
+- return locks_mandatory_locked(inode);
+- return 0;
+-}
+-
+-extern int rw_verify_area(int, struct file *, loff_t *, size_t);
+-
+-static inline int locks_verify_truncate(struct inode *inode,
+- struct file *filp,
+- loff_t size)
+-{
+- if (inode->i_flock && mandatory_lock(inode))
+- return locks_mandatory_area(
+- FLOCK_VERIFY_WRITE, inode, filp,
+- size < inode->i_size ? size : inode->i_size,
+- (size < inode->i_size ? inode->i_size - size
+- : size - inode->i_size)
+- );
+- return 0;
+-}
+-
+-static inline int break_lease(struct inode *inode, unsigned int mode)
+-{
+- if (inode->i_flock)
+- return __break_lease(inode, mode);
+- return 0;
+-}
+-
+-/* fs/open.c */
+-
+-extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
+- struct file *filp);
+-extern long do_sys_open(int dfd, const char __user *filename, int flags,
+- int mode);
+-extern struct file *filp_open(const char *, int, int);
+-extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
+-extern int filp_close(struct file *, fl_owner_t id);
+-extern char * getname(const char __user *);
+-
+-/* fs/dcache.c */
+-extern void __init vfs_caches_init_early(void);
+-extern void __init vfs_caches_init(unsigned long);
+-
+-extern struct kmem_cache *names_cachep;
+-
+-#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL)
+-#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
+-#ifndef CONFIG_AUDITSYSCALL
+-#define putname(name) __putname(name)
+-#else
+-extern void putname(const char *name);
+-#endif
+-
+-#ifdef CONFIG_BLOCK
+-extern int register_blkdev(unsigned int, const char *);
+-extern void unregister_blkdev(unsigned int, const char *);
+-extern struct block_device *bdget(dev_t);
+-extern void bd_set_size(struct block_device *, loff_t size);
+-extern void bd_forget(struct inode *inode);
+-extern void bdput(struct block_device *);
+-extern struct block_device *open_by_devnum(dev_t, unsigned);
+-#else
+-static inline void bd_forget(struct inode *inode) {}
+-#endif
+-extern const struct file_operations def_blk_fops;
+-extern const struct file_operations def_chr_fops;
+-extern const struct file_operations bad_sock_fops;
+-extern const struct file_operations def_fifo_fops;
+-#ifdef CONFIG_BLOCK
+-extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
+-extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long);
+-extern int blkdev_driver_ioctl(struct inode *inode, struct file *file,
+- struct gendisk *disk, unsigned cmd,
+- unsigned long arg);
+-extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
+-extern int blkdev_get(struct block_device *, mode_t, unsigned);
+-extern int blkdev_put(struct block_device *);
+-extern int bd_claim(struct block_device *, void *);
+-extern void bd_release(struct block_device *);
+-#ifdef CONFIG_SYSFS
+-extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *);
+-extern void bd_release_from_disk(struct block_device *, struct gendisk *);
+-#else
+-#define bd_claim_by_disk(bdev, holder, disk) bd_claim(bdev, holder)
+-#define bd_release_from_disk(bdev, disk) bd_release(bdev)
+-#endif
+-#endif
+-
+-/* fs/char_dev.c */
+-#define CHRDEV_MAJOR_HASH_SIZE 255
+-extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
+-extern int register_chrdev_region(dev_t, unsigned, const char *);
+-extern int register_chrdev(unsigned int, const char *,
+- const struct file_operations *);
+-extern void unregister_chrdev(unsigned int, const char *);
+-extern void unregister_chrdev_region(dev_t, unsigned);
+-extern void chrdev_show(struct seq_file *,off_t);
+-
+-/* fs/block_dev.c */
+-#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
+-
+-#ifdef CONFIG_BLOCK
+-#define BLKDEV_MAJOR_HASH_SIZE 255
+-extern const char *__bdevname(dev_t, char *buffer);
+-extern const char *bdevname(struct block_device *bdev, char *buffer);
+-extern struct block_device *lookup_bdev(const char *);
+-extern struct block_device *open_bdev_excl(const char *, int, void *);
+-extern void close_bdev_excl(struct block_device *);
+-extern void blkdev_show(struct seq_file *,off_t);
+-#else
+-#define BLKDEV_MAJOR_HASH_SIZE 0
+-#endif
+-
+-extern void init_special_inode(struct inode *, umode_t, dev_t);
+-
+-/* Invalid inode operations -- fs/bad_inode.c */
+-extern void make_bad_inode(struct inode *);
+-extern int is_bad_inode(struct inode *);
+-
+-extern const struct file_operations read_pipefifo_fops;
+-extern const struct file_operations write_pipefifo_fops;
+-extern const struct file_operations rdwr_pipefifo_fops;
+-
+-extern int fs_may_remount_ro(struct super_block *);
+-
+-#ifdef CONFIG_BLOCK
+-/*
+- * return READ, READA, or WRITE
+- */
+-#define bio_rw(bio) ((bio)->bi_rw & (RW_MASK | RWA_MASK))
+-
+-/*
+- * return data direction, READ or WRITE
+- */
+-#define bio_data_dir(bio) ((bio)->bi_rw & 1)
+-
+-extern int check_disk_change(struct block_device *);
+-extern int __invalidate_device(struct block_device *);
+-extern int invalidate_partition(struct gendisk *, int);
+-#endif
+-extern int invalidate_inodes(struct super_block *);
+-unsigned long __invalidate_mapping_pages(struct address_space *mapping,
+- pgoff_t start, pgoff_t end,
+- bool be_atomic);
+-unsigned long invalidate_mapping_pages(struct address_space *mapping,
+- pgoff_t start, pgoff_t end);
+-
+-static inline unsigned long __deprecated
+-invalidate_inode_pages(struct address_space *mapping)
+-{
+- return invalidate_mapping_pages(mapping, 0, ~0UL);
+-}
+-
+-static inline void invalidate_remote_inode(struct inode *inode)
+-{
+- if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+- S_ISLNK(inode->i_mode))
+- invalidate_mapping_pages(inode->i_mapping, 0, -1);
+-}
+-extern int invalidate_inode_pages2(struct address_space *mapping);
+-extern int invalidate_inode_pages2_range(struct address_space *mapping,
+- pgoff_t start, pgoff_t end);
+-extern void generic_sync_sb_inodes(struct super_block *sb,
+- struct writeback_control *wbc);
+-extern int write_inode_now(struct inode *, int);
+-extern int filemap_fdatawrite(struct address_space *);
+-extern int filemap_flush(struct address_space *);
+-extern int filemap_fdatawait(struct address_space *);
+-extern int filemap_write_and_wait(struct address_space *mapping);
+-extern int filemap_write_and_wait_range(struct address_space *mapping,
+- loff_t lstart, loff_t lend);
+-extern int wait_on_page_writeback_range(struct address_space *mapping,
+- pgoff_t start, pgoff_t end);
+-extern int __filemap_fdatawrite_range(struct address_space *mapping,
+- loff_t start, loff_t end, int sync_mode);
+-extern int filemap_fdatawrite_range(struct address_space *mapping,
+- loff_t start, loff_t end);
+-
+-extern long do_fsync(struct file *file, int datasync);
+-extern void sync_supers(void);
+-extern void sync_filesystems(int wait);
+-extern void __fsync_super(struct super_block *sb);
+-extern void emergency_sync(void);
+-extern void emergency_remount(void);
+-extern int do_remount_sb(struct super_block *sb, int flags,
+- void *data, int force);
+-#ifdef CONFIG_BLOCK
+-extern sector_t bmap(struct inode *, sector_t);
+-#endif
+-extern int notify_change(struct dentry *, struct iattr *);
+-extern int inode_permission(struct inode *, int);
+-extern int generic_permission(struct inode *, int,
+- int (*check_acl)(struct inode *, int));
+-
+-extern int get_write_access(struct inode *);
+-extern int deny_write_access(struct file *);
+-static inline void put_write_access(struct inode * inode)
+-{
+- atomic_dec(&inode->i_writecount);
+-}
+-static inline void allow_write_access(struct file *file)
+-{
+- if (file)
+- atomic_inc(&file->f_path.dentry->d_inode->i_writecount);
+-}
+-extern int do_pipe(int *);
+-extern int do_pipe_flags(int *, int);
+-extern struct file *create_read_pipe(struct file *f, int flags);
+-extern struct file *create_write_pipe(int flags);
+-extern void free_write_pipe(struct file *);
+-
+-extern struct file *do_filp_open(int dfd, const char *pathname,
+- int open_flag, int mode);
+-extern int may_open(struct nameidata *, int, int);
+-
+-extern int kernel_read(struct file *, unsigned long, char *, unsigned long);
+-extern struct file * open_exec(const char *);
+-
+-/* fs/dcache.c -- generic fs support functions */
+-extern int is_subdir(struct dentry *, struct dentry *);
+-extern ino_t find_inode_number(struct dentry *, struct qstr *);
+-
+-#include <linux/err.h>
+-
+-/* needed for stackable file system support */
+-extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
+-
+-extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
+-
+-extern void inode_init_once(struct inode *);
+-extern void iput(struct inode *);
+-extern struct inode * igrab(struct inode *);
+-extern ino_t iunique(struct super_block *, ino_t);
+-extern int inode_needs_sync(struct inode *inode);
+-extern void generic_delete_inode(struct inode *inode);
+-extern void generic_drop_inode(struct inode *inode);
+-
+-extern struct inode *ilookup5_nowait(struct super_block *sb,
+- unsigned long hashval, int (*test)(struct inode *, void *),
+- void *data);
+-extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
+- int (*test)(struct inode *, void *), void *data);
+-extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
+-
+-extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
+-extern struct inode * iget_locked(struct super_block *, unsigned long);
+-extern void unlock_new_inode(struct inode *);
+-
+-extern void __iget(struct inode * inode);
+-extern void iget_failed(struct inode *);
+-extern void clear_inode(struct inode *);
+-extern void destroy_inode(struct inode *);
+-extern struct inode *new_inode(struct super_block *);
+-extern int should_remove_suid(struct dentry *);
+-extern int file_remove_suid(struct file *);
+-
+-extern void __insert_inode_hash(struct inode *, unsigned long hashval);
+-extern void remove_inode_hash(struct inode *);
+-static inline void insert_inode_hash(struct inode *inode) {
+- __insert_inode_hash(inode, inode->i_ino);
+-}
+-
+-extern struct file * get_empty_filp(void);
+-extern void file_move(struct file *f, struct list_head *list);
+-extern void file_kill(struct file *f);
+-#ifdef CONFIG_BLOCK
+-struct bio;
+-extern void submit_bio(int, struct bio *);
+-extern int bdev_read_only(struct block_device *);
+-#endif
+-extern int set_blocksize(struct block_device *, int);
+-extern int sb_set_blocksize(struct super_block *, int);
+-extern int sb_min_blocksize(struct super_block *, int);
+-extern int sb_has_dirty_inodes(struct super_block *);
+-
+-extern int generic_file_mmap(struct file *, struct vm_area_struct *);
+-extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
+-extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
+-int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
+-extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+-extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+-extern ssize_t generic_file_aio_write_nolock(struct kiocb *, const struct iovec *,
+- unsigned long, loff_t);
+-extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
+- unsigned long *, loff_t, loff_t *, size_t, size_t);
+-extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
+- unsigned long, loff_t, loff_t *, size_t, ssize_t);
+-extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
+-extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
+-extern int generic_segment_checks(const struct iovec *iov,
+- unsigned long *nr_segs, size_t *count, int access_flags);
+-
+-/* fs/splice.c */
+-extern ssize_t generic_file_splice_read(struct file *, loff_t *,
+- struct pipe_inode_info *, size_t, unsigned int);
+-extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
+- struct file *, loff_t *, size_t, unsigned int);
+-extern ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *,
+- struct file *, loff_t *, size_t, unsigned int);
+-extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
+- struct file *out, loff_t *, size_t len, unsigned int flags);
+-extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+- size_t len, unsigned int flags);
+-
+-extern void
+-file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
+-extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
+-extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
+-extern loff_t generic_file_llseek_unlocked(struct file *file, loff_t offset,
+- int origin);
+-extern int generic_file_open(struct inode * inode, struct file * filp);
+-extern int nonseekable_open(struct inode * inode, struct file * filp);
+-
+-#ifdef CONFIG_FS_XIP
+-extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len,
+- loff_t *ppos);
+-extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma);
+-extern ssize_t xip_file_write(struct file *filp, const char __user *buf,
+- size_t len, loff_t *ppos);
+-extern int xip_truncate_page(struct address_space *mapping, loff_t from);
+-#else
+-static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
+-{
+- return 0;
+-}
+-#endif
+-
+-#ifdef CONFIG_BLOCK
+-ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
+- struct block_device *bdev, const struct iovec *iov, loff_t offset,
+- unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
+- int lock_type);
+-
+-enum {
+- DIO_LOCKING = 1, /* need locking between buffered and direct access */
+- DIO_NO_LOCKING, /* bdev; no locking at all between buffered/direct */
+- DIO_OWN_LOCKING, /* filesystem locks buffered and direct internally */
+-};
+-
+-static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
+- struct inode *inode, struct block_device *bdev, const struct iovec *iov,
+- loff_t offset, unsigned long nr_segs, get_block_t get_block,
+- dio_iodone_t end_io)
+-{
+- return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
+- nr_segs, get_block, end_io, DIO_LOCKING);
+-}
+-
+-static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
+- struct inode *inode, struct block_device *bdev, const struct iovec *iov,
+- loff_t offset, unsigned long nr_segs, get_block_t get_block,
+- dio_iodone_t end_io)
+-{
+- return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
+- nr_segs, get_block, end_io, DIO_NO_LOCKING);
+-}
+-
+-static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
+- struct inode *inode, struct block_device *bdev, const struct iovec *iov,
+- loff_t offset, unsigned long nr_segs, get_block_t get_block,
+- dio_iodone_t end_io)
+-{
+- return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
+- nr_segs, get_block, end_io, DIO_OWN_LOCKING);
+-}
+-#endif
+-
+-extern const struct file_operations generic_ro_fops;
+-
+-#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
+-
+-extern int vfs_readlink(struct dentry *, char __user *, int, const char *);
+-extern int vfs_follow_link(struct nameidata *, const char *);
+-extern int page_readlink(struct dentry *, char __user *, int);
+-extern void *page_follow_link_light(struct dentry *, struct nameidata *);
+-extern void page_put_link(struct dentry *, struct nameidata *, void *);
+-extern int __page_symlink(struct inode *inode, const char *symname, int len,
+- int nofs);
+-extern int page_symlink(struct inode *inode, const char *symname, int len);
+-extern const struct inode_operations page_symlink_inode_operations;
+-extern int generic_readlink(struct dentry *, char __user *, int);
+-extern void generic_fillattr(struct inode *, struct kstat *);
+-extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+-void inode_add_bytes(struct inode *inode, loff_t bytes);
+-void inode_sub_bytes(struct inode *inode, loff_t bytes);
+-loff_t inode_get_bytes(struct inode *inode);
+-void inode_set_bytes(struct inode *inode, loff_t bytes);
+-
+-extern int vfs_readdir(struct file *, filldir_t, void *);
+-
+-extern int vfs_stat(char __user *, struct kstat *);
+-extern int vfs_lstat(char __user *, struct kstat *);
+-extern int vfs_stat_fd(int dfd, char __user *, struct kstat *);
+-extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *);
+-extern int vfs_fstat(unsigned int, struct kstat *);
+-
+-extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
+- unsigned long arg);
+-
+-extern void get_filesystem(struct file_system_type *fs);
+-extern void put_filesystem(struct file_system_type *fs);
+-extern struct file_system_type *get_fs_type(const char *name);
+-extern struct super_block *get_super(struct block_device *);
+-extern struct super_block *user_get_super(dev_t);
+-extern void drop_super(struct super_block *sb);
+-
+-extern int dcache_dir_open(struct inode *, struct file *);
+-extern int dcache_dir_close(struct inode *, struct file *);
+-extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
+-extern int dcache_readdir(struct file *, void *, filldir_t);
+-extern int dcache_readdir_filter(struct file *, void *, filldir_t, int (*)(struct dentry *));
+-extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+-extern int simple_statfs(struct dentry *, struct kstatfs *);
+-extern int simple_link(struct dentry *, struct inode *, struct dentry *);
+-extern int simple_unlink(struct inode *, struct dentry *);
+-extern int simple_rmdir(struct inode *, struct dentry *);
+-extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+-extern int simple_sync_file(struct file *, struct dentry *, int);
+-extern int simple_empty(struct dentry *);
+-extern int simple_readpage(struct file *file, struct page *page);
+-extern int simple_prepare_write(struct file *file, struct page *page,
+- unsigned offset, unsigned to);
+-extern int simple_write_begin(struct file *file, struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned flags,
+- struct page **pagep, void **fsdata);
+-extern int simple_write_end(struct file *file, struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned copied,
+- struct page *page, void *fsdata);
+-
+-extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *);
+-extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
+-extern const struct file_operations simple_dir_operations;
+-extern const struct inode_operations simple_dir_inode_operations;
+-struct tree_descr { char *name; const struct file_operations *ops; int mode; };
+-struct dentry *d_alloc_name(struct dentry *, const char *);
+-extern int simple_fill_super(struct super_block *, int, struct tree_descr *);
+-extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
+-extern void simple_release_fs(struct vfsmount **mount, int *count);
+-
+-extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
+- loff_t *ppos, const void *from, size_t available);
+-
+-#ifdef CONFIG_MIGRATION
+-extern int buffer_migrate_page(struct address_space *,
+- struct page *, struct page *);
+-#else
+-#define buffer_migrate_page NULL
+-#endif
+-
+-extern int inode_change_ok(struct inode *, struct iattr *);
+-extern int __must_check inode_setattr(struct inode *, struct iattr *);
+-
+-extern void file_update_time(struct file *file);
+-
+-extern int generic_show_options(struct seq_file *m, struct vfsmount *mnt);
+-extern void save_mount_options(struct super_block *sb, char *options);
+-
+-static inline ino_t parent_ino(struct dentry *dentry)
+-{
+- ino_t res;
+-
+- spin_lock(&dentry->d_lock);
+- res = dentry->d_parent->d_inode->i_ino;
+- spin_unlock(&dentry->d_lock);
+- return res;
+-}
+-
+-/* Transaction based IO helpers */
+-
+-/*
+- * An argresp is stored in an allocated page and holds the
+- * size of the argument or response, along with its content
+- */
+-struct simple_transaction_argresp {
+- ssize_t size;
+- char data[0];
+-};
+-
+-#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
+-
+-char *simple_transaction_get(struct file *file, const char __user *buf,
+- size_t size);
+-ssize_t simple_transaction_read(struct file *file, char __user *buf,
+- size_t size, loff_t *pos);
+-int simple_transaction_release(struct inode *inode, struct file *file);
+-
+-static inline void simple_transaction_set(struct file *file, size_t n)
+-{
+- struct simple_transaction_argresp *ar = file->private_data;
+-
+- BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
+-
+- /*
+- * The barrier ensures that ar->size will really remain zero until
+- * ar->data is ready for reading.
+- */
+- smp_mb();
+- ar->size = n;
+-}
+-
+-/*
+- * simple attribute files
+- *
+- * These attributes behave similar to those in sysfs:
+- *
+- * Writing to an attribute immediately sets a value, an open file can be
+- * written to multiple times.
+- *
+- * Reading from an attribute creates a buffer from the value that might get
+- * read with multiple read calls. When the attribute has been read
+- * completely, no further read calls are possible until the file is opened
+- * again.
+- *
+- * All attributes contain a text representation of a numeric value
+- * that are accessed with the get() and set() functions.
+- */
+-#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
+-static int __fops ## _open(struct inode *inode, struct file *file) \
+-{ \
+- __simple_attr_check_format(__fmt, 0ull); \
+- return simple_attr_open(inode, file, __get, __set, __fmt); \
+-} \
+-static struct file_operations __fops = { \
+- .owner = THIS_MODULE, \
+- .open = __fops ## _open, \
+- .release = simple_attr_release, \
+- .read = simple_attr_read, \
+- .write = simple_attr_write, \
+-};
+-
+-static inline void __attribute__((format(printf, 1, 2)))
+-__simple_attr_check_format(const char *fmt, ...)
+-{
+- /* don't do anything, just let the compiler check the arguments; */
+-}
+-
+-int simple_attr_open(struct inode *inode, struct file *file,
+- int (*get)(void *, u64 *), int (*set)(void *, u64),
+- const char *fmt);
+-int simple_attr_release(struct inode *inode, struct file *file);
+-ssize_t simple_attr_read(struct file *file, char __user *buf,
+- size_t len, loff_t *ppos);
+-ssize_t simple_attr_write(struct file *file, const char __user *buf,
+- size_t len, loff_t *ppos);
+-
+-
+-#ifdef CONFIG_SECURITY
+-static inline char *alloc_secdata(void)
+-{
+- return (char *)get_zeroed_page(GFP_KERNEL);
+-}
+-
+-static inline void free_secdata(void *secdata)
+-{
+- free_page((unsigned long)secdata);
+-}
+-#else
+-static inline char *alloc_secdata(void)
+-{
+- return (char *)1;
+-}
+-
+-static inline void free_secdata(void *secdata)
+-{ }
+-#endif /* CONFIG_SECURITY */
+-
+-struct ctl_table;
+-int proc_nr_files(struct ctl_table *table, int write, struct file *filp,
+- void __user *buffer, size_t *lenp, loff_t *ppos);
+-
+-int get_filesystem_list(char * buf);
+-
+-#endif /* __KERNEL__ */
+-#endif /* _LINUX_FS_H */
+diff -Nurb linux-2.6.27-720/include/linux/highmem.h linux-2.6.27-710/include/linux/highmem.h
+--- linux-2.6.27-720/include/linux/highmem.h 2009-05-04 12:18:34.000000000 -0400
++++ linux-2.6.27-710/include/linux/highmem.h 2008-10-09 18:13:53.000000000 -0400
+@@ -165,8 +165,8 @@
+ {
+ char *vfrom, *vto;
+
+- vfrom = (char*)kmap_atomic(from, KM_USER0);
+- vto = (char*)kmap_atomic(to, KM_USER1);
++ vfrom = kmap_atomic(from, KM_USER0);
++ vto = kmap_atomic(to, KM_USER1);
+ copy_user_page(vto, vfrom, vaddr, to);
+ kunmap_atomic(vfrom, KM_USER0);
+ kunmap_atomic(vto, KM_USER1);
+@@ -178,8 +178,8 @@
+ {
+ char *vfrom, *vto;
+
+- vfrom = (char*)kmap_atomic(from, KM_USER0);
+- vto = (char*)kmap_atomic(to, KM_USER1);
++ vfrom = kmap_atomic(from, KM_USER0);
++ vto = kmap_atomic(to, KM_USER1);
+ copy_page(vto, vfrom);
+ kunmap_atomic(vfrom, KM_USER0);
+ kunmap_atomic(vto, KM_USER1);
+diff -Nurb linux-2.6.27-720/include/linux/hrtimer.h linux-2.6.27-710/include/linux/hrtimer.h
+--- linux-2.6.27-720/include/linux/hrtimer.h 2009-05-04 12:18:34.000000000 -0400
++++ linux-2.6.27-710/include/linux/hrtimer.h 2008-10-09 18:13:53.000000000 -0400
+@@ -384,7 +384,7 @@
+ static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
+ {
+ timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
+- (void *) timer->function, timer->start_comm, 0);
++ timer->function, timer->start_comm, 0);
+ }
+
+ extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer,
+diff -Nurb linux-2.6.27-720/include/linux/inetdevice.h linux-2.6.27-710/include/linux/inetdevice.h
+--- linux-2.6.27-720/include/linux/inetdevice.h 2009-05-04 12:18:34.000000000 -0400
++++ linux-2.6.27-710/include/linux/inetdevice.h 2008-10-09 18:13:53.000000000 -0400
+@@ -166,7 +166,7 @@
+
+ static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev)
+ {
+- struct in_device *in_dev = (struct in_device *) dev->ip_ptr;
++ struct in_device *in_dev = dev->ip_ptr;
+ if (in_dev)
+ in_dev = rcu_dereference(in_dev);
+ return in_dev;
+diff -Nurb linux-2.6.27-720/include/linux/jhash.h linux-2.6.27-710/include/linux/jhash.h
+--- linux-2.6.27-720/include/linux/jhash.h 2009-05-04 12:18:34.000000000 -0400
++++ linux-2.6.27-710/include/linux/jhash.h 2008-10-09 18:13:53.000000000 -0400
+@@ -44,7 +44,7 @@
+ static inline u32 jhash(const void *key, u32 length, u32 initval)
+ {
+ u32 a, b, c, len;
+- const u8 *k = (const u8 *) key;
++ const u8 *k = key;
+
+ len = length;
+ a = b = JHASH_GOLDEN_RATIO;
+diff -Nurb linux-2.6.27-720/include/linux/kernel.h linux-2.6.27-710/include/linux/kernel.h
+--- linux-2.6.27-720/include/linux/kernel.h 2009-05-04 12:18:34.000000000 -0400
++++ linux-2.6.27-710/include/linux/kernel.h 2008-10-09 18:13:53.000000000 -0400
+@@ -213,7 +213,7 @@
+ { return false; }
+ #endif
+
+-asmlinkage void __attribute__((format(printf, 1, 2)))
++extern void asmlinkage __attribute__((format(printf, 1, 2)))
+ early_printk(const char *fmt, ...);
+
+ unsigned long int_sqrt(unsigned long);
+diff -Nurb linux-2.6.27-720/include/linux/ktime.h linux-2.6.27-710/include/linux/ktime.h
+--- linux-2.6.27-720/include/linux/ktime.h 2009-05-04 12:18:34.000000000 -0400
++++ linux-2.6.27-710/include/linux/ktime.h 2008-10-09 18:13:53.000000000 -0400
+@@ -71,12 +71,6 @@
+
+ #if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)
+
+-#ifdef __cplusplus
+-# define KTIME_TV64(__s) ({ ktime_t __kt; __kt.tv64 = (__s); __kt; })
+-#else
+-# define KTIME_TV64(__s) ((ktime_t) { .tv64 = (__s) })
+-#endif
+-
+ /**
+ * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
+ * @secs: seconds to set
+@@ -88,37 +82,32 @@
+ {
+ #if (BITS_PER_LONG == 64)
+ if (unlikely(secs >= KTIME_SEC_MAX))
+- return KTIME_TV64(KTIME_MAX);
++ return (ktime_t){ .tv64 = KTIME_MAX };
+ #endif
+- return KTIME_TV64((s64)secs * NSEC_PER_SEC + (s64)nsecs);
++ return (ktime_t) { .tv64 = (s64)secs * NSEC_PER_SEC + (s64)nsecs };
+ }
+
+ /* Subtract two ktime_t variables. rem = lhs -rhs: */
+ #define ktime_sub(lhs, rhs) \
+- KTIME_TV64((lhs).tv64 - (rhs).tv64)
++ ({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; })
+
+ /* Add two ktime_t variables. res = lhs + rhs: */
+ #define ktime_add(lhs, rhs) \
+- KTIME_TV64((lhs).tv64 + (rhs).tv64)
++ ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
+
+ /*
+ * Add a ktime_t variable and a scalar nanosecond value.
+ * res = kt + nsval:
+ */
+ #define ktime_add_ns(kt, nsval) \
+- KTIME_TV64((kt).tv64 + (nsval))
++ ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; })
+
+ /*
+ * Subtract a scalar nanosecod from a ktime_t variable
+ * res = kt - nsval:
+ */
+-#ifdef __cplusplus
+-#define ktime_sub_ns(kt, nsval) \
+- ({ktime_t duh; duh.tv64 = (kt).tv64 - (nsval); duh; })
+-#else
+ #define ktime_sub_ns(kt, nsval) \
+ ({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; })
+-#endif
+
+ /* convert a timespec to ktime_t format: */
+ static inline ktime_t timespec_to_ktime(struct timespec ts)
+@@ -143,18 +132,6 @@
+
+ #else
+
+-#ifdef __cplusplus
+-# define KTIME_TV64(__s) ({ ktime_t __kt; __kt.tv64 = (__s); __kt; })
+-# define KTIME_SEC_NSEC(__sec, __nsec) ({ ktime_t __kt; __kt.tv.sec = (__sec); __kt.tv.nsec = (__nsec); __kt; })
+-# define TIMEVAL_SEC_USEC(__sec, __usec) ({ struct timeval __tv; __tv.tv_sec = (__sec); __tv.tv_usec = (__usec); __tv; })
+-# define TIMESPEC_SEC_NSEC(__sec, __nsec) ({ struct timespec __ts; __ts.tv_sec = (__sec); __ts.tv_nsec = (__nsec); __ts; })
+-#else
+-# define KTIME_TV64(__s) ((ktime_t) { .tv64 = (__s) })
+-# define KTIME_SEC_NSEC(__sec, __nsec) ((ktime_t) { .tv = { .sec = (__sec), .nsec = (__nsec) } })
+-# define TIMEVAL_SEC_USEC(__sec, __usec) ((struct timeval) { .tv_sec = (__sec), .tv_usec = (__usec) })
+-# define TIMESPEC_SEC_NSEC(__sec, __nsec) ((struct timespec) { .tv_sec = (__sec), .tv_nsec = (__nsec) })
+-#endif
+-
+ /*
+ * Helper macros/inlines to get the ktime_t math right in the timespec
+ * representation. The macros are sometimes ugly - their actual use is
+@@ -173,7 +150,7 @@
+ /* Set a ktime_t variable to a value in sec/nsec representation: */
+ static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
+ {
+- return KTIME_SEC_NSEC(secs, nsecs);
++ return (ktime_t) { .tv = { .sec = secs, .nsec = nsecs } };
+ }
+
+ /**
+@@ -246,7 +223,8 @@
+ */
+ static inline ktime_t timespec_to_ktime(const struct timespec ts)
+ {
+- return KTIME_SEC_NSEC((s32)ts.tv_sec, (s32)ts.tv_nsec);
++ return (ktime_t) { .tv = { .sec = (s32)ts.tv_sec,
++ .nsec = (s32)ts.tv_nsec } };
+ }
+
+ /**
+@@ -257,7 +235,8 @@
+ */
+ static inline ktime_t timeval_to_ktime(const struct timeval tv)
+ {
+- return KTIME_SEC_NSEC((s32)tv.tv_sec, (s32)tv.tv_usec * 1000);
++ return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec,
++ .nsec = (s32)tv.tv_usec * 1000 } };
+ }
+
+ /**
+@@ -268,7 +247,8 @@
+ */
+ static inline struct timespec ktime_to_timespec(const ktime_t kt)
+ {
+- return TIMESPEC_SEC_NSEC((time_t) kt.tv.sec, (long) kt.tv.nsec);
++ return (struct timespec) { .tv_sec = (time_t) kt.tv.sec,
++ .tv_nsec = (long) kt.tv.nsec };
+ }
+
+ /**
+@@ -279,8 +259,9 @@
+ */
+ static inline struct timeval ktime_to_timeval(const ktime_t kt)
+ {
+- return TIMEVAL_SEC_USEC((time_t) kt.tv.sec,
+- (suseconds_t) (kt.tv.nsec / NSEC_PER_USEC));
++ return (struct timeval) {
++ .tv_sec = (time_t) kt.tv.sec,
++ .tv_usec = (suseconds_t) (kt.tv.nsec / NSEC_PER_USEC) };
+ }
+
+ /**
+@@ -348,7 +329,7 @@
+
+ static inline ktime_t ns_to_ktime(u64 ns)
+ {
+- static const ktime_t ktime_zero = ({ktime_t duh; duh.tv64 = 0;duh;});
++ static const ktime_t ktime_zero = { .tv64 = 0 };
+ return ktime_add_ns(ktime_zero, ns);
+ }
+
+diff -Nurb linux-2.6.27-720/include/linux/linkage.h linux-2.6.27-710/include/linux/linkage.h
+--- linux-2.6.27-720/include/linux/linkage.h 2009-05-04 12:18:34.000000000 -0400
++++ linux-2.6.27-710/include/linux/linkage.h 2008-10-09 18:13:53.000000000 -0400
+@@ -11,13 +11,6 @@
+ #else
+ #define CPP_ASMLINKAGE
+ #endif
+-#ifndef extern_asmlinkage
+-# ifdef __cplusplus
+-# define extern_asmlinkage asmlinkage
+-# else
+-# define extern_asmlinkage extern asmlinkage
+-# endif
+-#endif
+
+ #ifndef asmlinkage
+ #define asmlinkage CPP_ASMLINKAGE
+diff -Nurb linux-2.6.27-720/include/linux/list.h linux-2.6.27-710/include/linux/list.h
+--- linux-2.6.27-720/include/linux/list.h 2009-05-04 12:18:34.000000000 -0400
++++ linux-2.6.27-710/include/linux/list.h 2008-10-09 18:13:53.000000000 -0400
+@@ -20,11 +20,7 @@
+ struct list_head *next, *prev;
+ };
+
+-#ifdef __cplusplus
+-#define LIST_HEAD_INIT(name) ({struct list_head duh;duh.next=&(name);duh.prev=&(name);duh;})
+-#else
+ #define LIST_HEAD_INIT(name) { &(name), &(name) }
+-#endif
+
+ #define LIST_HEAD(name) \
+ struct list_head name = LIST_HEAD_INIT(name)
+@@ -107,8 +103,8 @@
+ static inline void list_del(struct list_head *entry)
+ {
+ __list_del(entry->prev, entry->next);
+- entry->next = (struct list_head*)(LIST_POISON1);
+- entry->prev = (struct list_head*)(LIST_POISON2);
++ entry->next = LIST_POISON1;
++ entry->prev = LIST_POISON2;
+ }
+ #else
+ extern void list_del(struct list_head *entry);
+@@ -580,8 +576,8 @@
+ static inline void hlist_del(struct hlist_node *n)
+ {
+ __hlist_del(n);
+- n->next = (struct hlist_node*)(LIST_POISON1);
+- n->pprev = (struct hlist_node**)(LIST_POISON2);
++ n->next = LIST_POISON1;
++ n->pprev = LIST_POISON2;
+ }
+
+ static inline void hlist_del_init(struct hlist_node *n)
+diff -Nurb linux-2.6.27-720/include/linux/mempolicy.h linux-2.6.27-710/include/linux/mempolicy.h
+--- linux-2.6.27-720/include/linux/mempolicy.h 2009-05-04 12:18:34.000000000 -0400
++++ linux-2.6.27-710/include/linux/mempolicy.h 2008-10-09 18:13:53.000000000 -0400
+@@ -241,7 +241,7 @@
+
+ #else
+
+-EMPTY_STRUCT_DECL(mempolicy);
++struct mempolicy {};
+
+ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
+ {
+@@ -271,7 +271,7 @@
+ return NULL;
+ }
+
+-EMPTY_STRUCT_DECL(shared_policy);
++struct shared_policy {};
+
+ static inline int mpol_set_shared_policy(struct shared_policy *info,
+ struct vm_area_struct *vma,
+diff -Nurb linux-2.6.27-720/include/linux/mm.h linux-2.6.27-710/include/linux/mm.h
+--- linux-2.6.27-720/include/linux/mm.h 2009-05-04 12:18:34.000000000 -0400
++++ linux-2.6.27-710/include/linux/mm.h 2009-05-04 12:15:01.000000000 -0400
+@@ -326,7 +326,7 @@
+ static inline void set_compound_page_dtor(struct page *page,
+ compound_page_dtor *dtor)
+ {
+- page[1].lru.next = (struct list_head *)dtor;
++ page[1].lru.next = (void *)dtor;
+ }
+
+ static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
+@@ -343,7 +343,7 @@
+
+ static inline void set_compound_order(struct page *page, unsigned long order)
+ {
+- page[1].lru.prev = (struct list_head *)order;
++ page[1].lru.prev = (void *)order;
+ }
+
+ /*
+@@ -493,7 +493,7 @@
+
+ static inline enum zone_type page_zonenum(struct page *page)
+ {
+- return (enum zone_type) ((page->flags >> ZONES_PGSHIFT) & ZONES_MASK);
++ return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
+ }
+
+ /*
+diff -Nurb linux-2.6.27-720/include/linux/mm.h.orig linux-2.6.27-710/include/linux/mm.h.orig
+--- linux-2.6.27-720/include/linux/mm.h.orig 2009-05-04 12:15:01.000000000 -0400
++++ linux-2.6.27-710/include/linux/mm.h.orig 1969-12-31 19:00:00.000000000 -0500
+@@ -1,1289 +0,0 @@
+-#ifndef _LINUX_MM_H
+-#define _LINUX_MM_H
+-
+-#include <linux/errno.h>
+-
+-#ifdef __KERNEL__
+-
+-#include <linux/gfp.h>
+-#include <linux/list.h>
+-#include <linux/mmzone.h>
+-#include <linux/rbtree.h>
+-#include <linux/prio_tree.h>
+-#include <linux/debug_locks.h>
+-#include <linux/mm_types.h>
+-
+-struct mempolicy;
+-struct anon_vma;
+-struct file_ra_state;
+-struct user_struct;
+-struct writeback_control;
+-
+-#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
+-extern unsigned long max_mapnr;
+-#endif
+-
+-extern unsigned long num_physpages;
+-extern void * high_memory;
+-extern int page_cluster;
+-
+-#ifdef CONFIG_SYSCTL
+-extern int sysctl_legacy_va_layout;
+-#else
+-#define sysctl_legacy_va_layout 0
+-#endif
+-
+-extern unsigned long mmap_min_addr;
+-
+-#include <asm/page.h>
+-#include <asm/pgtable.h>
+-#include <asm/processor.h>
+-
+-#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
+-
+-/* to align the pointer to the (next) page boundary */
+-#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+-
+-/*
+- * Linux kernel virtual memory manager primitives.
+- * The idea being to have a "virtual" mm in the same way
+- * we have a virtual fs - giving a cleaner interface to the
+- * mm details, and allowing different kinds of memory mappings
+- * (from shared memory to executable loading to arbitrary
+- * mmap() functions).
+- */
+-
+-extern struct kmem_cache *vm_area_cachep;
+-
+-/*
+- * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is
+- * disabled, then there's a single shared list of VMAs maintained by the
+- * system, and mm's subscribe to these individually
+- */
+-struct vm_list_struct {
+- struct vm_list_struct *next;
+- struct vm_area_struct *vma;
+-};
+-
+-#ifndef CONFIG_MMU
+-extern struct rb_root nommu_vma_tree;
+-extern struct rw_semaphore nommu_vma_sem;
+-
+-extern unsigned int kobjsize(const void *objp);
+-#endif
+-
+-/*
+- * vm_flags in vm_area_struct, see mm_types.h.
+- */
+-#define VM_READ 0x00000001 /* currently active flags */
+-#define VM_WRITE 0x00000002
+-#define VM_EXEC 0x00000004
+-#define VM_SHARED 0x00000008
+-
+-/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
+-#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
+-#define VM_MAYWRITE 0x00000020
+-#define VM_MAYEXEC 0x00000040
+-#define VM_MAYSHARE 0x00000080
+-
+-#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+-#define VM_GROWSUP 0x00000200
+-#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
+-#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
+-
+-#define VM_EXECUTABLE 0x00001000
+-#define VM_LOCKED 0x00002000
+-#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
+-
+- /* Used by sys_madvise() */
+-#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
+-#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
+-
+-#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
+-#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
+-#define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */
+-#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
+-#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
+-#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
+-#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
+-#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
+-#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
+-#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
+-
+-#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
+-#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
+-#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
+-
+-#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
+-#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+-#endif
+-
+-#ifdef CONFIG_STACK_GROWSUP
+-#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+-#else
+-#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+-#endif
+-
+-#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
+-#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
+-#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
+-#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
+-#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
+-
+-/*
+- * mapping from the currently active vm_flags protection bits (the
+- * low four bits) to a page protection mask..
+- */
+-extern pgprot_t protection_map[16];
+-
+-#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
+-#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
+-
+-
+-/*
+- * vm_fault is filled by the the pagefault handler and passed to the vma's
+- * ->fault function. The vma's ->fault is responsible for returning a bitmask
+- * of VM_FAULT_xxx flags that give details about how the fault was handled.
+- *
+- * pgoff should be used in favour of virtual_address, if possible. If pgoff
+- * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear
+- * mapping support.
+- */
+-struct vm_fault {
+- unsigned int flags; /* FAULT_FLAG_xxx flags */
+- pgoff_t pgoff; /* Logical page offset based on vma */
+- void __user *virtual_address; /* Faulting virtual address */
+-
+- struct page *page; /* ->fault handlers should return a
+- * page here, unless VM_FAULT_NOPAGE
+- * is set (which is also implied by
+- * VM_FAULT_ERROR).
+- */
+-};
+-
+-/*
+- * These are the virtual MM functions - opening of an area, closing and
+- * unmapping it (needed to keep files on disk up-to-date etc), pointer
+- * to the functions called when a no-page or a wp-page exception occurs.
+- */
+-struct vm_operations_struct {
+- void (*open)(struct vm_area_struct * area);
+- void (*close)(struct vm_area_struct * area);
+- int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+-
+- /* notification that a previously read-only page is about to become
+- * writable, if an error is returned it will cause a SIGBUS */
+- int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
+-
+- /* called by access_process_vm when get_user_pages() fails, typically
+- * for use by special VMAs that can switch between memory and hardware
+- */
+- int (*access)(struct vm_area_struct *vma, unsigned long addr,
+- void *buf, int len, int write);
+-#ifdef CONFIG_NUMA
+- /*
+- * set_policy() op must add a reference to any non-NULL @new mempolicy
+- * to hold the policy upon return. Caller should pass NULL @new to
+- * remove a policy and fall back to surrounding context--i.e. do not
+- * install a MPOL_DEFAULT policy, nor the task or system default
+- * mempolicy.
+- */
+- int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
+-
+- /*
+- * get_policy() op must add reference [mpol_get()] to any policy at
+- * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
+- * in mm/mempolicy.c will do this automatically.
+- * get_policy() must NOT add a ref if the policy at (vma,addr) is not
+- * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
+- * If no [shared/vma] mempolicy exists at the addr, get_policy() op
+- * must return NULL--i.e., do not "fallback" to task or system default
+- * policy.
+- */
+- struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
+- unsigned long addr);
+- int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
+- const nodemask_t *to, unsigned long flags);
+-#endif
+-};
+-
+-struct mmu_gather;
+-struct inode;
+-
+-#define page_private(page) ((page)->private)
+-#define set_page_private(page, v) ((page)->private = (v))
+-
+-/*
+- * FIXME: take this include out, include page-flags.h in
+- * files which need it (119 of them)
+- */
+-#include <linux/page-flags.h>
+-
+-#ifdef CONFIG_DEBUG_VM
+-#define VM_BUG_ON(cond) BUG_ON(cond)
+-#else
+-#define VM_BUG_ON(condition) do { } while(0)
+-#endif
+-
+-/*
+- * Methods to modify the page usage count.
+- *
+- * What counts for a page usage:
+- * - cache mapping (page->mapping)
+- * - private data (page->private)
+- * - page mapped in a task's page tables, each mapping
+- * is counted separately
+- *
+- * Also, many kernel routines increase the page count before a critical
+- * routine so they can be sure the page doesn't go away from under them.
+- */
+-
+-/*
+- * Drop a ref, return true if the refcount fell to zero (the page has no users)
+- */
+-static inline int put_page_testzero(struct page *page)
+-{
+- VM_BUG_ON(atomic_read(&page->_count) == 0);
+- return atomic_dec_and_test(&page->_count);
+-}
+-
+-/*
+- * Try to grab a ref unless the page has a refcount of zero, return false if
+- * that is the case.
+- */
+-static inline int get_page_unless_zero(struct page *page)
+-{
+- return atomic_inc_not_zero(&page->_count);
+-}
+-
+-/* Support for virtually mapped pages */
+-struct page *vmalloc_to_page(const void *addr);
+-unsigned long vmalloc_to_pfn(const void *addr);
+-
+-/*
+- * Determine if an address is within the vmalloc range
+- *
+- * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
+- * is no special casing required.
+- */
+-static inline int is_vmalloc_addr(const void *x)
+-{
+-#ifdef CONFIG_MMU
+- unsigned long addr = (unsigned long)x;
+-
+- return addr >= VMALLOC_START && addr < VMALLOC_END;
+-#else
+- return 0;
+-#endif
+-}
+-
+-static inline struct page *compound_head(struct page *page)
+-{
+- if (unlikely(PageTail(page)))
+- return page->first_page;
+- return page;
+-}
+-
+-static inline int page_count(struct page *page)
+-{
+- return atomic_read(&compound_head(page)->_count);
+-}
+-
+-static inline void get_page(struct page *page)
+-{
+- page = compound_head(page);
+- VM_BUG_ON(atomic_read(&page->_count) == 0);
+- atomic_inc(&page->_count);
+-}
+-
+-static inline struct page *virt_to_head_page(const void *x)
+-{
+- struct page *page = virt_to_page(x);
+- return compound_head(page);
+-}
+-
+-/*
+- * Setup the page count before being freed into the page allocator for
+- * the first time (boot or memory hotplug)
+- */
+-static inline void init_page_count(struct page *page)
+-{
+- atomic_set(&page->_count, 1);
+-}
+-
+-void put_page(struct page *page);
+-void put_pages_list(struct list_head *pages);
+-
+-void split_page(struct page *page, unsigned int order);
+-
+-/*
+- * Compound pages have a destructor function. Provide a
+- * prototype for that function and accessor functions.
+- * These are _only_ valid on the head of a PG_compound page.
+- */
+-typedef void compound_page_dtor(struct page *);
+-
+-static inline void set_compound_page_dtor(struct page *page,
+- compound_page_dtor *dtor)
+-{
+- page[1].lru.next = (void *)dtor;
+-}
+-
+-static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
+-{
+- return (compound_page_dtor *)page[1].lru.next;
+-}
+-
+-static inline int compound_order(struct page *page)
+-{
+- if (!PageHead(page))
+- return 0;
+- return (unsigned long)page[1].lru.prev;
+-}
+-
+-static inline void set_compound_order(struct page *page, unsigned long order)
+-{
+- page[1].lru.prev = (void *)order;
+-}
+-
+-/*
+- * Multiple processes may "see" the same page. E.g. for untouched
+- * mappings of /dev/null, all processes see the same page full of
+- * zeroes, and text pages of executables and shared libraries have
+- * only one copy in memory, at most, normally.
+- *
+- * For the non-reserved pages, page_count(page) denotes a reference count.
+- * page_count() == 0 means the page is free. page->lru is then used for
+- * freelist management in the buddy allocator.
+- * page_count() > 0 means the page has been allocated.
+- *
+- * Pages are allocated by the slab allocator in order to provide memory
+- * to kmalloc and kmem_cache_alloc. In this case, the management of the
+- * page, and the fields in 'struct page' are the responsibility of mm/slab.c
+- * unless a particular usage is carefully commented. (the responsibility of
+- * freeing the kmalloc memory is the caller's, of course).
+- *
+- * A page may be used by anyone else who does a __get_free_page().
+- * In this case, page_count still tracks the references, and should only
+- * be used through the normal accessor functions. The top bits of page->flags
+- * and page->virtual store page management information, but all other fields
+- * are unused and could be used privately, carefully. The management of this
+- * page is the responsibility of the one who allocated it, and those who have
+- * subsequently been given references to it.
+- *
+- * The other pages (we may call them "pagecache pages") are completely
+- * managed by the Linux memory manager: I/O, buffers, swapping etc.
+- * The following discussion applies only to them.
+- *
+- * A pagecache page contains an opaque `private' member, which belongs to the
+- * page's address_space. Usually, this is the address of a circular list of
+- * the page's disk buffers. PG_private must be set to tell the VM to call
+- * into the filesystem to release these pages.
+- *
+- * A page may belong to an inode's memory mapping. In this case, page->mapping
+- * is the pointer to the inode, and page->index is the file offset of the page,
+- * in units of PAGE_CACHE_SIZE.
+- *
+- * If pagecache pages are not associated with an inode, they are said to be
+- * anonymous pages. These may become associated with the swapcache, and in that
+- * case PG_swapcache is set, and page->private is an offset into the swapcache.
+- *
+- * In either case (swapcache or inode backed), the pagecache itself holds one
+- * reference to the page. Setting PG_private should also increment the
+- * refcount. The each user mapping also has a reference to the page.
+- *
+- * The pagecache pages are stored in a per-mapping radix tree, which is
+- * rooted at mapping->page_tree, and indexed by offset.
+- * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
+- * lists, we instead now tag pages as dirty/writeback in the radix tree.
+- *
+- * All pagecache pages may be subject to I/O:
+- * - inode pages may need to be read from disk,
+- * - inode pages which have been modified and are MAP_SHARED may need
+- * to be written back to the inode on disk,
+- * - anonymous pages (including MAP_PRIVATE file mappings) which have been
+- * modified may need to be swapped out to swap space and (later) to be read
+- * back into memory.
+- */
+-
+-/*
+- * The zone field is never updated after free_area_init_core()
+- * sets it, so none of the operations on it need to be atomic.
+- */
+-
+-
+-/*
+- * page->flags layout:
+- *
+- * There are three possibilities for how page->flags get
+- * laid out. The first is for the normal case, without
+- * sparsemem. The second is for sparsemem when there is
+- * plenty of space for node and section. The last is when
+- * we have run out of space and have to fall back to an
+- * alternate (slower) way of determining the node.
+- *
+- * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
+- * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
+- * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
+- */
+-#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
+-#define SECTIONS_WIDTH SECTIONS_SHIFT
+-#else
+-#define SECTIONS_WIDTH 0
+-#endif
+-
+-#define ZONES_WIDTH ZONES_SHIFT
+-
+-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+-#define NODES_WIDTH NODES_SHIFT
+-#else
+-#ifdef CONFIG_SPARSEMEM_VMEMMAP
+-#error "Vmemmap: No space for nodes field in page flags"
+-#endif
+-#define NODES_WIDTH 0
+-#endif
+-
+-/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
+-#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
+-#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
+-#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
+-
+-/*
+- * We are going to use the flags for the page to node mapping if its in
+- * there. This includes the case where there is no node, so it is implicit.
+- */
+-#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
+-#define NODE_NOT_IN_PAGE_FLAGS
+-#endif
+-
+-#ifndef PFN_SECTION_SHIFT
+-#define PFN_SECTION_SHIFT 0
+-#endif
+-
+-/*
+- * Define the bit shifts to access each section. For non-existant
+- * sections we define the shift as 0; that plus a 0 mask ensures
+- * the compiler will optimise away reference to them.
+- */
+-#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
+-#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
+-#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
+-
+-/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */
+-#ifdef NODE_NOT_IN_PAGEFLAGS
+-#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
+-#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
+- SECTIONS_PGOFF : ZONES_PGOFF)
+-#else
+-#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
+-#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
+- NODES_PGOFF : ZONES_PGOFF)
+-#endif
+-
+-#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
+-
+-#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
+-#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
+-#endif
+-
+-#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
+-#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
+-#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
+-#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
+-
+-static inline enum zone_type page_zonenum(struct page *page)
+-{
+- return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
+-}
+-
+-/*
+- * The identification function is only used by the buddy allocator for
+- * determining if two pages could be buddies. We are not really
+- * identifying a zone since we could be using a the section number
+- * id if we have not node id available in page flags.
+- * We guarantee only that it will return the same value for two
+- * combinable pages in a zone.
+- */
+-static inline int page_zone_id(struct page *page)
+-{
+- return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
+-}
+-
+-static inline int zone_to_nid(struct zone *zone)
+-{
+-#ifdef CONFIG_NUMA
+- return zone->node;
+-#else
+- return 0;
+-#endif
+-}
+-
+-#ifdef NODE_NOT_IN_PAGE_FLAGS
+-extern int page_to_nid(struct page *page);
+-#else
+-static inline int page_to_nid(struct page *page)
+-{
+- return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
+-}
+-#endif
+-
+-static inline struct zone *page_zone(struct page *page)
+-{
+- return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
+-}
+-
+-#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
+-static inline unsigned long page_to_section(struct page *page)
+-{
+- return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
+-}
+-#endif
+-
+-static inline void set_page_zone(struct page *page, enum zone_type zone)
+-{
+- page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
+- page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
+-}
+-
+-static inline void set_page_node(struct page *page, unsigned long node)
+-{
+- page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
+- page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
+-}
+-
+-static inline void set_page_section(struct page *page, unsigned long section)
+-{
+- page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
+- page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
+-}
+-
+-static inline void set_page_links(struct page *page, enum zone_type zone,
+- unsigned long node, unsigned long pfn)
+-{
+- set_page_zone(page, zone);
+- set_page_node(page, node);
+- set_page_section(page, pfn_to_section_nr(pfn));
+-}
+-
+-/*
+- * If a hint addr is less than mmap_min_addr change hint to be as
+- * low as possible but still greater than mmap_min_addr
+- */
+-static inline unsigned long round_hint_to_min(unsigned long hint)
+-{
+-#ifdef CONFIG_SECURITY
+- hint &= PAGE_MASK;
+- if (((void *)hint != NULL) &&
+- (hint < mmap_min_addr))
+- return PAGE_ALIGN(mmap_min_addr);
+-#endif
+- return hint;
+-}
+-
+-/*
+- * Some inline functions in vmstat.h depend on page_zone()
+- */
+-#include <linux/vmstat.h>
+-
+-static __always_inline void *lowmem_page_address(struct page *page)
+-{
+- return __va(page_to_pfn(page) << PAGE_SHIFT);
+-}
+-
+-#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
+-#define HASHED_PAGE_VIRTUAL
+-#endif
+-
+-#if defined(WANT_PAGE_VIRTUAL)
+-#define page_address(page) ((page)->virtual)
+-#define set_page_address(page, address) \
+- do { \
+- (page)->virtual = (address); \
+- } while(0)
+-#define page_address_init() do { } while(0)
+-#endif
+-
+-#if defined(HASHED_PAGE_VIRTUAL)
+-void *page_address(struct page *page);
+-void set_page_address(struct page *page, void *virtual);
+-void page_address_init(void);
+-#endif
+-
+-#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
+-#define page_address(page) lowmem_page_address(page)
+-#define set_page_address(page, address) do { } while(0)
+-#define page_address_init() do { } while(0)
+-#endif
+-
+-/*
+- * On an anonymous page mapped into a user virtual memory area,
+- * page->mapping points to its anon_vma, not to a struct address_space;
+- * with the PAGE_MAPPING_ANON bit set to distinguish it.
+- *
+- * Please note that, confusingly, "page_mapping" refers to the inode
+- * address_space which maps the page from disk; whereas "page_mapped"
+- * refers to user virtual address space into which the page is mapped.
+- */
+-#define PAGE_MAPPING_ANON 1
+-
+-extern struct address_space swapper_space;
+-static inline struct address_space *page_mapping(struct page *page)
+-{
+- struct address_space *mapping = page->mapping;
+-
+- VM_BUG_ON(PageSlab(page));
+-#ifdef CONFIG_SWAP
+- if (unlikely(PageSwapCache(page)))
+- mapping = &swapper_space;
+- else
+-#endif
+- if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
+- mapping = NULL;
+- return mapping;
+-}
+-
+-static inline int PageAnon(struct page *page)
+-{
+- return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+-}
+-
+-/*
+- * Return the pagecache index of the passed page. Regular pagecache pages
+- * use ->index whereas swapcache pages use ->private
+- */
+-static inline pgoff_t page_index(struct page *page)
+-{
+- if (unlikely(PageSwapCache(page)))
+- return page_private(page);
+- return page->index;
+-}
+-
+-/*
+- * The atomic page->_mapcount, like _count, starts from -1:
+- * so that transitions both from it and to it can be tracked,
+- * using atomic_inc_and_test and atomic_add_negative(-1).
+- */
+-static inline void reset_page_mapcount(struct page *page)
+-{
+- atomic_set(&(page)->_mapcount, -1);
+-}
+-
+-static inline int page_mapcount(struct page *page)
+-{
+- return atomic_read(&(page)->_mapcount) + 1;
+-}
+-
+-/*
+- * Return true if this page is mapped into pagetables.
+- */
+-static inline int page_mapped(struct page *page)
+-{
+- return atomic_read(&(page)->_mapcount) >= 0;
+-}
+-
+-/*
+- * Different kinds of faults, as returned by handle_mm_fault().
+- * Used to decide whether a process gets delivered SIGBUS or
+- * just gets major/minor fault counters bumped up.
+- */
+-
+-#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */
+-
+-#define VM_FAULT_OOM 0x0001
+-#define VM_FAULT_SIGBUS 0x0002
+-#define VM_FAULT_MAJOR 0x0004
+-#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
+-
+-#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
+-#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
+-
+-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS)
+-
+-#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
+-
+-extern void show_free_areas(void);
+-
+-#ifdef CONFIG_SHMEM
+-int shmem_lock(struct file *file, int lock, struct user_struct *user);
+-#else
+-static inline int shmem_lock(struct file *file, int lock,
+- struct user_struct *user)
+-{
+- return 0;
+-}
+-#endif
+-struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
+-
+-int shmem_zero_setup(struct vm_area_struct *);
+-
+-#ifndef CONFIG_MMU
+-extern unsigned long shmem_get_unmapped_area(struct file *file,
+- unsigned long addr,
+- unsigned long len,
+- unsigned long pgoff,
+- unsigned long flags);
+-#endif
+-
+-extern int can_do_mlock(void);
+-extern int user_shm_lock(size_t, struct user_struct *);
+-extern void user_shm_unlock(size_t, struct user_struct *);
+-
+-/*
+- * Parameter block passed down to zap_pte_range in exceptional cases.
+- */
+-struct zap_details {
+- struct vm_area_struct *nonlinear_vma; /* Check page->index if set */
+- struct address_space *check_mapping; /* Check page->mapping if set */
+- pgoff_t first_index; /* Lowest page->index to unmap */
+- pgoff_t last_index; /* Highest page->index to unmap */
+- spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */
+- unsigned long truncate_count; /* Compare vm_truncate_count */
+-};
+-
+-struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+- pte_t pte);
+-
+-int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+- unsigned long size);
+-unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
+- unsigned long size, struct zap_details *);
+-unsigned long unmap_vmas(struct mmu_gather **tlb,
+- struct vm_area_struct *start_vma, unsigned long start_addr,
+- unsigned long end_addr, unsigned long *nr_accounted,
+- struct zap_details *);
+-
+-/**
+- * mm_walk - callbacks for walk_page_range
+- * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
+- * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
+- * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
+- * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
+- * @pte_hole: if set, called for each hole at all levels
+- *
+- * (see walk_page_range for more details)
+- */
+-struct mm_walk {
+- int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
+- int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
+- int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
+- int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
+- int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
+- struct mm_struct *mm;
+- void *private;
+-};
+-
+-int walk_page_range(unsigned long addr, unsigned long end,
+- struct mm_walk *walk);
+-void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
+- unsigned long end, unsigned long floor, unsigned long ceiling);
+-int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
+- struct vm_area_struct *vma);
+-void unmap_mapping_range(struct address_space *mapping,
+- loff_t const holebegin, loff_t const holelen, int even_cows);
+-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+- void *buf, int len, int write);
+-
+-static inline void unmap_shared_mapping_range(struct address_space *mapping,
+- loff_t const holebegin, loff_t const holelen)
+-{
+- unmap_mapping_range(mapping, holebegin, holelen, 0);
+-}
+-
+-extern int vmtruncate(struct inode * inode, loff_t offset);
+-extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
+-
+-#ifdef CONFIG_MMU
+-extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+- unsigned long address, int write_access);
+-#else
+-static inline int handle_mm_fault(struct mm_struct *mm,
+- struct vm_area_struct *vma, unsigned long address,
+- int write_access)
+-{
+- /* should never happen if there's no MMU */
+- BUG();
+- return VM_FAULT_SIGBUS;
+-}
+-#endif
+-
+-extern int make_pages_present(unsigned long addr, unsigned long end);
+-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+-
+-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
+- int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
+-
+-extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
+-extern void do_invalidatepage(struct page *page, unsigned long offset);
+-
+-int __set_page_dirty_nobuffers(struct page *page);
+-int __set_page_dirty_no_writeback(struct page *page);
+-int redirty_page_for_writepage(struct writeback_control *wbc,
+- struct page *page);
+-int set_page_dirty(struct page *page);
+-int set_page_dirty_lock(struct page *page);
+-int clear_page_dirty_for_io(struct page *page);
+-
+-extern unsigned long move_page_tables(struct vm_area_struct *vma,
+- unsigned long old_addr, struct vm_area_struct *new_vma,
+- unsigned long new_addr, unsigned long len);
+-extern unsigned long do_mremap(unsigned long addr,
+- unsigned long old_len, unsigned long new_len,
+- unsigned long flags, unsigned long new_addr);
+-extern int mprotect_fixup(struct vm_area_struct *vma,
+- struct vm_area_struct **pprev, unsigned long start,
+- unsigned long end, unsigned long newflags);
+-
+-/*
+- * get_user_pages_fast provides equivalent functionality to get_user_pages,
+- * operating on current and current->mm (force=0 and doesn't return any vmas).
+- *
+- * get_user_pages_fast may take mmap_sem and page tables, so no assumptions
+- * can be made about locking. get_user_pages_fast is to be implemented in a
+- * way that is advantageous (vs get_user_pages()) when the user memory area is
+- * already faulted in and present in ptes. However if the pages have to be
+- * faulted in, it may turn out to be slightly slower).
+- */
+-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+- struct page **pages);
+-
+-/*
+- * A callback you can register to apply pressure to ageable caches.
+- *
+- * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
+- * look through the least-recently-used 'nr_to_scan' entries and
+- * attempt to free them up. It should return the number of objects
+- * which remain in the cache. If it returns -1, it means it cannot do
+- * any scanning at this time (eg. there is a risk of deadlock).
+- *
+- * The 'gfpmask' refers to the allocation we are currently trying to
+- * fulfil.
+- *
+- * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
+- * querying the cache size, so a fastpath for that case is appropriate.
+- */
+-struct shrinker {
+- int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
+- int seeks; /* seeks to recreate an obj */
+-
+- /* These are for internal use */
+- struct list_head list;
+- long nr; /* objs pending delete */
+-};
+-#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
+-extern void register_shrinker(struct shrinker *);
+-extern void unregister_shrinker(struct shrinker *);
+-
+-int vma_wants_writenotify(struct vm_area_struct *vma);
+-
+-extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
+-
+-#ifdef __PAGETABLE_PUD_FOLDED
+-static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+- unsigned long address)
+-{
+- return 0;
+-}
+-#else
+-int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+-#endif
+-
+-#ifdef __PAGETABLE_PMD_FOLDED
+-static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+- unsigned long address)
+-{
+- return 0;
+-}
+-#else
+-int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
+-#endif
+-
+-int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
+-int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
+-
+-/*
+- * The following ifdef needed to get the 4level-fixup.h header to work.
+- * Remove it when 4level-fixup.h has been removed.
+- */
+-#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
+-static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+-{
+- return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
+- NULL: pud_offset(pgd, address);
+-}
+-
+-static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+-{
+- return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
+- NULL: pmd_offset(pud, address);
+-}
+-#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
+-
+-#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
+-/*
+- * We tuck a spinlock to guard each pagetable page into its struct page,
+- * at page->private, with BUILD_BUG_ON to make sure that this will not
+- * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
+- * When freeing, reset page->mapping so free_pages_check won't complain.
+- */
+-#define __pte_lockptr(page) &((page)->ptl)
+-#define pte_lock_init(_page) do { \
+- spin_lock_init(__pte_lockptr(_page)); \
+-} while (0)
+-#define pte_lock_deinit(page) ((page)->mapping = NULL)
+-#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
+-#else
+-/*
+- * We use mm->page_table_lock to guard all pagetable pages of the mm.
+- */
+-#define pte_lock_init(page) do {} while (0)
+-#define pte_lock_deinit(page) do {} while (0)
+-#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
+-#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+-
+-static inline void pgtable_page_ctor(struct page *page)
+-{
+- pte_lock_init(page);
+- inc_zone_page_state(page, NR_PAGETABLE);
+-}
+-
+-static inline void pgtable_page_dtor(struct page *page)
+-{
+- pte_lock_deinit(page);
+- dec_zone_page_state(page, NR_PAGETABLE);
+-}
+-
+-#define pte_offset_map_lock(mm, pmd, address, ptlp) \
+-({ \
+- spinlock_t *__ptl = pte_lockptr(mm, pmd); \
+- pte_t *__pte = pte_offset_map(pmd, address); \
+- *(ptlp) = __ptl; \
+- spin_lock(__ptl); \
+- __pte; \
+-})
+-
+-#define pte_unmap_unlock(pte, ptl) do { \
+- spin_unlock(ptl); \
+- pte_unmap(pte); \
+-} while (0)
+-
+-#define pte_alloc_map(mm, pmd, address) \
+- ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
+- NULL: pte_offset_map(pmd, address))
+-
+-#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
+- ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
+- NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
+-
+-#define pte_alloc_kernel(pmd, address) \
+- ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
+- NULL: pte_offset_kernel(pmd, address))
+-
+-extern void free_area_init(unsigned long * zones_size);
+-extern void free_area_init_node(int nid, unsigned long * zones_size,
+- unsigned long zone_start_pfn, unsigned long *zholes_size);
+-#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+-/*
+- * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its
+- * zones, allocate the backing mem_map and account for memory holes in a more
+- * architecture independent manner. This is a substitute for creating the
+- * zone_sizes[] and zholes_size[] arrays and passing them to
+- * free_area_init_node()
+- *
+- * An architecture is expected to register range of page frames backed by
+- * physical memory with add_active_range() before calling
+- * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
+- * usage, an architecture is expected to do something like
+- *
+- * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
+- * max_highmem_pfn};
+- * for_each_valid_physical_page_range()
+- * add_active_range(node_id, start_pfn, end_pfn)
+- * free_area_init_nodes(max_zone_pfns);
+- *
+- * If the architecture guarantees that there are no holes in the ranges
+- * registered with add_active_range(), free_bootmem_active_regions()
+- * will call free_bootmem_node() for each registered physical page range.
+- * Similarly sparse_memory_present_with_active_regions() calls
+- * memory_present() for each range when SPARSEMEM is enabled.
+- *
+- * See mm/page_alloc.c for more information on each function exposed by
+- * CONFIG_ARCH_POPULATES_NODE_MAP
+- */
+-extern void free_area_init_nodes(unsigned long *max_zone_pfn);
+-extern void add_active_range(unsigned int nid, unsigned long start_pfn,
+- unsigned long end_pfn);
+-extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
+- unsigned long end_pfn);
+-extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn,
+- unsigned long end_pfn);
+-extern void remove_all_active_ranges(void);
+-extern unsigned long absent_pages_in_range(unsigned long start_pfn,
+- unsigned long end_pfn);
+-extern void get_pfn_range_for_nid(unsigned int nid,
+- unsigned long *start_pfn, unsigned long *end_pfn);
+-extern unsigned long find_min_pfn_with_active_regions(void);
+-extern void free_bootmem_with_active_regions(int nid,
+- unsigned long max_low_pfn);
+-typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
+-extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
+-extern void sparse_memory_present_with_active_regions(int nid);
+-#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
+-extern int early_pfn_to_nid(unsigned long pfn);
+-#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
+-#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+-extern void set_dma_reserve(unsigned long new_dma_reserve);
+-extern void memmap_init_zone(unsigned long, int, unsigned long,
+- unsigned long, enum memmap_context);
+-extern void setup_per_zone_pages_min(void);
+-extern void mem_init(void);
+-extern void show_mem(void);
+-extern void si_meminfo(struct sysinfo * val);
+-extern void si_meminfo_node(struct sysinfo *val, int nid);
+-extern int after_bootmem;
+-
+-#ifdef CONFIG_NUMA
+-extern void setup_per_cpu_pageset(void);
+-#else
+-static inline void setup_per_cpu_pageset(void) {}
+-#endif
+-
+-/* prio_tree.c */
+-void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
+-void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
+-void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
+-struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
+- struct prio_tree_iter *iter);
+-
+-#define vma_prio_tree_foreach(vma, iter, root, begin, end) \
+- for (prio_tree_iter_init(iter, root, begin, end), vma = NULL; \
+- (vma = vma_prio_tree_next(vma, iter)); )
+-
+-static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
+- struct list_head *list)
+-{
+- vma->shared.vm_set.parent = NULL;
+- list_add_tail(&vma->shared.vm_set.list, list);
+-}
+-
+-/* mmap.c */
+-extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
+-extern void vma_adjust(struct vm_area_struct *vma, unsigned long start,
+- unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
+-extern struct vm_area_struct *vma_merge(struct mm_struct *,
+- struct vm_area_struct *prev, unsigned long addr, unsigned long end,
+- unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
+- struct mempolicy *);
+-extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
+-extern int split_vma(struct mm_struct *,
+- struct vm_area_struct *, unsigned long addr, int new_below);
+-extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
+-extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
+- struct rb_node **, struct rb_node *);
+-extern void unlink_file_vma(struct vm_area_struct *);
+-extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
+- unsigned long addr, unsigned long len, pgoff_t pgoff);
+-extern void exit_mmap(struct mm_struct *);
+-
+-extern int mm_take_all_locks(struct mm_struct *mm);
+-extern void mm_drop_all_locks(struct mm_struct *mm);
+-
+-#ifdef CONFIG_PROC_FS
+-/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
+-extern void added_exe_file_vma(struct mm_struct *mm);
+-extern void removed_exe_file_vma(struct mm_struct *mm);
+-#else
+-static inline void added_exe_file_vma(struct mm_struct *mm)
+-{}
+-
+-static inline void removed_exe_file_vma(struct mm_struct *mm)
+-{}
+-#endif /* CONFIG_PROC_FS */
+-
+-extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
+-extern int install_special_mapping(struct mm_struct *mm,
+- unsigned long addr, unsigned long len,
+- unsigned long flags, struct page **pages);
+-
+-extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+-
+-extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+- unsigned long len, unsigned long prot,
+- unsigned long flag, unsigned long pgoff);
+-extern unsigned long mmap_region(struct file *file, unsigned long addr,
+- unsigned long len, unsigned long flags,
+- unsigned int vm_flags, unsigned long pgoff,
+- int accountable);
+-
+-static inline unsigned long do_mmap(struct file *file, unsigned long addr,
+- unsigned long len, unsigned long prot,
+- unsigned long flag, unsigned long offset)
+-{
+- unsigned long ret = -EINVAL;
+- if ((offset + PAGE_ALIGN(len)) < offset)
+- goto out;
+- if (!(offset & ~PAGE_MASK))
+- ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
+-out:
+- return ret;
+-}
+-
+-extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+-
+-extern unsigned long do_brk(unsigned long, unsigned long);
+-
+-/* filemap.c */
+-extern unsigned long page_unuse(struct page *);
+-extern void truncate_inode_pages(struct address_space *, loff_t);
+-extern void truncate_inode_pages_range(struct address_space *,
+- loff_t lstart, loff_t lend);
+-
+-/* generic vm_area_ops exported for stackable file systems */
+-extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
+-
+-/* mm/page-writeback.c */
+-int write_one_page(struct page *page, int wait);
+-
+-/* readahead.c */
+-#define VM_MAX_READAHEAD 128 /* kbytes */
+-#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
+-
+-int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
+- pgoff_t offset, unsigned long nr_to_read);
+-int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
+- pgoff_t offset, unsigned long nr_to_read);
+-
+-void page_cache_sync_readahead(struct address_space *mapping,
+- struct file_ra_state *ra,
+- struct file *filp,
+- pgoff_t offset,
+- unsigned long size);
+-
+-void page_cache_async_readahead(struct address_space *mapping,
+- struct file_ra_state *ra,
+- struct file *filp,
+- struct page *pg,
+- pgoff_t offset,
+- unsigned long size);
+-
+-unsigned long max_sane_readahead(unsigned long nr);
+-
+-/* Do stack extension */
+-extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+-#ifdef CONFIG_IA64
+-extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
+-#endif
+-extern int expand_stack_downwards(struct vm_area_struct *vma,
+- unsigned long address);
+-
+-/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+-extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
+-extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+- struct vm_area_struct **pprev);
+-
+-/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+- NULL if none. Assume start_addr < end_addr. */
+-static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+-{
+- struct vm_area_struct * vma = find_vma(mm,start_addr);
+-
+- if (vma && end_addr <= vma->vm_start)
+- vma = NULL;
+- return vma;
+-}
+-
+-static inline unsigned long vma_pages(struct vm_area_struct *vma)
+-{
+- return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+-}
+-
+-pgprot_t vm_get_page_prot(unsigned long vm_flags);
+-struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
+-int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
+- unsigned long pfn, unsigned long size, pgprot_t);
+-int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+-int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+- unsigned long pfn);
+-int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+- unsigned long pfn);
+-
+-struct page *follow_page(struct vm_area_struct *, unsigned long address,
+- unsigned int foll_flags);
+-#define FOLL_WRITE 0x01 /* check pte is writable */
+-#define FOLL_TOUCH 0x02 /* mark page accessed */
+-#define FOLL_GET 0x04 /* do get_page on page */
+-#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
+-
+-typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
+- void *data);
+-extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
+- unsigned long size, pte_fn_t fn, void *data);
+-
+-#ifdef CONFIG_PROC_FS
+-void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+-#else
+-static inline void vm_stat_account(struct mm_struct *mm,
+- unsigned long flags, struct file *file, long pages)
+-{
+-}
+-#endif /* CONFIG_PROC_FS */
+-
+-#ifdef CONFIG_DEBUG_PAGEALLOC
+-extern int debug_pagealloc_enabled;
+-
+-extern void kernel_map_pages(struct page *page, int numpages, int enable);
+-
+-static inline void enable_debug_pagealloc(void)
+-{
+- debug_pagealloc_enabled = 1;
+-}
+-#ifdef CONFIG_HIBERNATION
+-extern bool kernel_page_present(struct page *page);
+-#endif /* CONFIG_HIBERNATION */
+-#else
+-static inline void
+-kernel_map_pages(struct page *page, int numpages, int enable) {}
+-static inline void enable_debug_pagealloc(void)
+-{
+-}
+-#ifdef CONFIG_HIBERNATION
+-static inline bool kernel_page_present(struct page *page) { return true; }
+-#endif /* CONFIG_HIBERNATION */
+-#endif
+-
+-extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
+-#ifdef __HAVE_ARCH_GATE_AREA
+-int in_gate_area_no_task(unsigned long addr);
+-int in_gate_area(struct task_struct *task, unsigned long addr);
+-#else
+-int in_gate_area_no_task(unsigned long addr);
+-#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);})
+-#endif /* __HAVE_ARCH_GATE_AREA */
+-
+-int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
+- void __user *, size_t *, loff_t *);
+-unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
+- unsigned long lru_pages);
+-
+-#ifndef CONFIG_MMU
+-#define randomize_va_space 0
+-#else
+-extern int randomize_va_space;
+-#endif
+-
+-const char * arch_vma_name(struct vm_area_struct *vma);
+-void print_vma_addr(char *prefix, unsigned long rip);
+-
+-struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
+-pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
+-pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
+-pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
+-pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
+-void *vmemmap_alloc_block(unsigned long size, int node);
+-void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
+-int vmemmap_populate_basepages(struct page *start_page,
+- unsigned long pages, int node);
+-int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
+-void vmemmap_populate_print_last(void);
+-
+-#endif /* __KERNEL__ */
+-#endif /* _LINUX_MM_H */
+diff -Nurb linux-2.6.27-720/include/linux/netdevice.h linux-2.6.27-710/include/linux/netdevice.h
+--- linux-2.6.27-720/include/linux/netdevice.h 2009-05-04 12:18:34.000000000 -0400
++++ linux-2.6.27-710/include/linux/netdevice.h 2009-05-04 12:16:04.000000000 -0400
+@@ -735,46 +735,6 @@
+ /* GARP */
+ struct garp_port *garp_port;
+
+- /* Click polling support */
+- /*
+- * polling is < 0 if the device does not support polling, == 0 if the
+- * device supports polling but interrupts are on, and > 0 if polling
+- * is on.
+- */
+- int polling;
+- int (*poll_on)(struct net_device *);
+- int (*poll_off)(struct net_device *);
+- /*
+- * rx_poll returns to caller a linked list of sk_buff objects received
+- * by the device. on call, the want argument specifies the number of
+- * packets wanted. on return, the want argument specifies the number
+- * of packets actually returned.
+- */
+- struct sk_buff * (*rx_poll)(struct net_device*, int *want);
+- /* refill rx dma ring using the given sk_buff list. returns 0 if
+- * successful, or if there are more entries need to be cleaned,
+- * returns the number of dirty entries. the ptr to the sk_buff list is
+- * updated by the driver to point to any unused skbs.
+- */
+- int (*rx_refill)(struct net_device*, struct sk_buff**);
+- /*
+- * place sk_buff on the transmit ring. returns 0 if successful, 1
+- * otherwise
+- */
+- int (*tx_queue)(struct net_device *, struct sk_buff *);
+- /*
+- * clean tx dma ring. returns the list of skb objects cleaned
+- */
+- struct sk_buff* (*tx_clean)(struct net_device *);
+- /*
+- * start transmission. returns 0 if successful, 1 otherwise
+- */
+- int (*tx_start)(struct net_device *);
+- /*
+- * tell device the end of a batch of packets
+- */
+- int (*tx_eob)(struct net_device *);
+-
+ /* class/net/name entry */
+ struct device dev;
+ /* space for optional statistics and wireless sysfs groups */
+@@ -959,11 +919,6 @@
+ extern int dev_queue_xmit(struct sk_buff *skb);
+ extern int register_netdevice(struct net_device *dev);
+ extern void unregister_netdevice(struct net_device *dev);
+-
+-extern int register_net_in(struct notifier_block *nb); /* Click */
+-extern int unregister_net_in(struct notifier_block *nb); /* Click */
+-extern int ptype_dispatch(struct sk_buff *skb, unsigned short type); /* Click */
+-
+ extern void free_netdev(struct net_device *dev);
+ extern void synchronize_net(void);
+ extern int register_netdevice_notifier(struct notifier_block *nb);
+@@ -1262,10 +1217,7 @@
+ extern int netif_rx(struct sk_buff *skb);
+ extern int netif_rx_ni(struct sk_buff *skb);
+ #define HAVE_NETIF_RECEIVE_SKB 1
+-//extern int netif_receive_skb(struct sk_buff *skb);
+-#define HAVE___NETIF_RECEIVE_SKB 1
+-extern int __netif_receive_skb(struct sk_buff *skb, unsigned short protocol, int ignore_notifiers);
+-
++extern int netif_receive_skb(struct sk_buff *skb);
+ extern void netif_nit_deliver(struct sk_buff *skb);
+ extern int dev_valid_name(const char *name);
+ extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
+@@ -1406,11 +1358,6 @@
+
+ extern void netif_device_attach(struct net_device *dev);
+
+-static inline int netif_receive_skb(struct sk_buff *skb)
+-{
+- return __netif_receive_skb(skb, skb->protocol, 0);
+-}
+-
+ /*
+ * Network interface message level settings
+ */
+diff -Nurb linux-2.6.27-720/include/linux/netdevice.h.orig linux-2.6.27-710/include/linux/netdevice.h.orig
+--- linux-2.6.27-720/include/linux/netdevice.h.orig 2009-05-04 12:16:04.000000000 -0400
++++ linux-2.6.27-710/include/linux/netdevice.h.orig 1969-12-31 19:00:00.000000000 -0500
+@@ -1,1732 +0,0 @@
+-/*
+- * INET An implementation of the TCP/IP protocol suite for the LINUX
+- * operating system. INET is implemented using the BSD Socket
+- * interface as the means of communication with the user level.
+- *
+- * Definitions for the Interfaces handler.
+- *
+- * Version: @(#)dev.h 1.0.10 08/12/93
+- *
+- * Authors: Ross Biro
+- * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+- * Corey Minyard <wf-rch!minyard@relay.EU.net>
+- * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
+- * Alan Cox, <Alan.Cox@linux.org>
+- * Bjorn Ekwall. <bj0rn@blox.se>
+- * Pekka Riikonen <priikone@poseidon.pspt.fi>
+- *
+- * This program is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License
+- * as published by the Free Software Foundation; either version
+- * 2 of the License, or (at your option) any later version.
+- *
+- * Moved to /usr/include/linux for NET3
+- */
+-#ifndef _LINUX_NETDEVICE_H
+-#define _LINUX_NETDEVICE_H
+-
+-#include <linux/if.h>
+-#include <linux/if_ether.h>
+-#include <linux/if_packet.h>
+-
+-#ifdef __KERNEL__
+-#include <linux/timer.h>
+-#include <linux/delay.h>
+-#include <asm/atomic.h>
+-#include <asm/cache.h>
+-#include <asm/byteorder.h>
+-
+-#include <linux/device.h>
+-#include <linux/percpu.h>
+-#include <linux/dmaengine.h>
+-#include <linux/workqueue.h>
+-
+-#include <net/net_namespace.h>
+-
+-struct vlan_group;
+-struct ethtool_ops;
+-struct netpoll_info;
+-/* 802.11 specific */
+-struct wireless_dev;
+- /* source back-compat hooks */
+-#define SET_ETHTOOL_OPS(netdev,ops) \
+- ( (netdev)->ethtool_ops = (ops) )
+-
+-#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
+- functions are available. */
+-#define HAVE_FREE_NETDEV /* free_netdev() */
+-#define HAVE_NETDEV_PRIV /* netdev_priv() */
+-
+-#define NET_XMIT_SUCCESS 0
+-#define NET_XMIT_DROP 1 /* skb dropped */
+-#define NET_XMIT_CN 2 /* congestion notification */
+-#define NET_XMIT_POLICED 3 /* skb is shot by police */
+-#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */
+-
+-/* Backlog congestion levels */
+-#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
+-#define NET_RX_DROP 1 /* packet dropped */
+-#define NET_RX_CN_LOW 2 /* storm alert, just in case */
+-#define NET_RX_CN_MOD 3 /* Storm on its way! */
+-#define NET_RX_CN_HIGH 4 /* The storm is here */
+-#define NET_RX_BAD 5 /* packet dropped due to kernel error */
+-
+-/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
+- * indicates that the device will soon be dropping packets, or already drops
+- * some packets of the same priority; prompting us to send less aggressively. */
+-#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
+-#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
+-
+-#endif
+-
+-#define MAX_ADDR_LEN 32 /* Largest hardware address length */
+-
+-/* Driver transmit return codes */
+-#define NETDEV_TX_OK 0 /* driver took care of packet */
+-#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
+-#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
+-
+-#ifdef __KERNEL__
+-
+-/*
+- * Compute the worst case header length according to the protocols
+- * used.
+- */
+-
+-#if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
+-# if defined(CONFIG_MAC80211_MESH)
+-# define LL_MAX_HEADER 128
+-# else
+-# define LL_MAX_HEADER 96
+-# endif
+-#elif defined(CONFIG_TR)
+-# define LL_MAX_HEADER 48
+-#else
+-# define LL_MAX_HEADER 32
+-#endif
+-
+-#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
+- !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
+- !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
+- !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
+-#define MAX_HEADER LL_MAX_HEADER
+-#else
+-#define MAX_HEADER (LL_MAX_HEADER + 48)
+-#endif
+-
+-#endif /* __KERNEL__ */
+-
+-/*
+- * Network device statistics. Akin to the 2.0 ether stats but
+- * with byte counters.
+- */
+-
+-struct net_device_stats
+-{
+- unsigned long rx_packets; /* total packets received */
+- unsigned long tx_packets; /* total packets transmitted */
+- unsigned long rx_bytes; /* total bytes received */
+- unsigned long tx_bytes; /* total bytes transmitted */
+- unsigned long rx_errors; /* bad packets received */
+- unsigned long tx_errors; /* packet transmit problems */
+- unsigned long rx_dropped; /* no space in linux buffers */
+- unsigned long tx_dropped; /* no space available in linux */
+- unsigned long multicast; /* multicast packets received */
+- unsigned long collisions;
+-
+- /* detailed rx_errors: */
+- unsigned long rx_length_errors;
+- unsigned long rx_over_errors; /* receiver ring buff overflow */
+- unsigned long rx_crc_errors; /* recved pkt with crc error */
+- unsigned long rx_frame_errors; /* recv'd frame alignment error */
+- unsigned long rx_fifo_errors; /* recv'r fifo overrun */
+- unsigned long rx_missed_errors; /* receiver missed packet */
+-
+- /* detailed tx_errors */
+- unsigned long tx_aborted_errors;
+- unsigned long tx_carrier_errors;
+- unsigned long tx_fifo_errors;
+- unsigned long tx_heartbeat_errors;
+- unsigned long tx_window_errors;
+-
+- /* for cslip etc */
+- unsigned long rx_compressed;
+- unsigned long tx_compressed;
+-};
+-
+-
+-/* Media selection options. */
+-enum {
+- IF_PORT_UNKNOWN = 0,
+- IF_PORT_10BASE2,
+- IF_PORT_10BASET,
+- IF_PORT_AUI,
+- IF_PORT_100BASET,
+- IF_PORT_100BASETX,
+- IF_PORT_100BASEFX
+-};
+-
+-#ifdef __KERNEL__
+-
+-#include <linux/cache.h>
+-#include <linux/skbuff.h>
+-
+-struct neighbour;
+-struct neigh_parms;
+-struct sk_buff;
+-
+-struct netif_rx_stats
+-{
+- unsigned total;
+- unsigned dropped;
+- unsigned time_squeeze;
+- unsigned cpu_collision;
+-};
+-
+-DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
+-
+-struct dev_addr_list
+-{
+- struct dev_addr_list *next;
+- u8 da_addr[MAX_ADDR_LEN];
+- u8 da_addrlen;
+- u8 da_synced;
+- int da_users;
+- int da_gusers;
+-};
+-
+-/*
+- * We tag multicasts with these structures.
+- */
+-
+-#define dev_mc_list dev_addr_list
+-#define dmi_addr da_addr
+-#define dmi_addrlen da_addrlen
+-#define dmi_users da_users
+-#define dmi_gusers da_gusers
+-
+-struct hh_cache
+-{
+- struct hh_cache *hh_next; /* Next entry */
+- atomic_t hh_refcnt; /* number of users */
+-/*
+- * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
+- * cache line on SMP.
+- * They are mostly read, but hh_refcnt may be changed quite frequently,
+- * incurring cache line ping pongs.
+- */
+- __be16 hh_type ____cacheline_aligned_in_smp;
+- /* protocol identifier, f.e ETH_P_IP
+- * NOTE: For VLANs, this will be the
+- * encapuslated type. --BLG
+- */
+- u16 hh_len; /* length of header */
+- int (*hh_output)(struct sk_buff *skb);
+- seqlock_t hh_lock;
+-
+- /* cached hardware header; allow for machine alignment needs. */
+-#define HH_DATA_MOD 16
+-#define HH_DATA_OFF(__len) \
+- (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
+-#define HH_DATA_ALIGN(__len) \
+- (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
+- unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
+-};
+-
+-/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
+- * Alternative is:
+- * dev->hard_header_len ? (dev->hard_header_len +
+- * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
+- *
+- * We could use other alignment values, but we must maintain the
+- * relationship HH alignment <= LL alignment.
+- *
+- * LL_ALLOCATED_SPACE also takes into account the tailroom the device
+- * may need.
+- */
+-#define LL_RESERVED_SPACE(dev) \
+- ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+-#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
+- ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+-#define LL_ALLOCATED_SPACE(dev) \
+- ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
+-
+-struct header_ops {
+- int (*create) (struct sk_buff *skb, struct net_device *dev,
+- unsigned short type, const void *daddr,
+- const void *saddr, unsigned len);
+- int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
+- int (*rebuild)(struct sk_buff *skb);
+-#define HAVE_HEADER_CACHE
+- int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
+- void (*cache_update)(struct hh_cache *hh,
+- const struct net_device *dev,
+- const unsigned char *haddr);
+-};
+-
+-/* These flag bits are private to the generic network queueing
+- * layer, they may not be explicitly referenced by any other
+- * code.
+- */
+-
+-enum netdev_state_t
+-{
+- __LINK_STATE_START,
+- __LINK_STATE_PRESENT,
+- __LINK_STATE_NOCARRIER,
+- __LINK_STATE_LINKWATCH_PENDING,
+- __LINK_STATE_DORMANT,
+-};
+-
+-
+-/*
+- * This structure holds at boot time configured netdevice settings. They
+- * are then used in the device probing.
+- */
+-struct netdev_boot_setup {
+- char name[IFNAMSIZ];
+- struct ifmap map;
+-};
+-#define NETDEV_BOOT_SETUP_MAX 8
+-
+-extern int __init netdev_boot_setup(char *str);
+-
+-/*
+- * Structure for NAPI scheduling similar to tasklet but with weighting
+- */
+-struct napi_struct {
+- /* The poll_list must only be managed by the entity which
+- * changes the state of the NAPI_STATE_SCHED bit. This means
+- * whoever atomically sets that bit can add this napi_struct
+- * to the per-cpu poll_list, and whoever clears that bit
+- * can remove from the list right before clearing the bit.
+- */
+- struct list_head poll_list;
+-
+- unsigned long state;
+- int weight;
+- int (*poll)(struct napi_struct *, int);
+-#ifdef CONFIG_NETPOLL
+- spinlock_t poll_lock;
+- int poll_owner;
+- struct net_device *dev;
+- struct list_head dev_list;
+-#endif
+-};
+-
+-enum
+-{
+- NAPI_STATE_SCHED, /* Poll is scheduled */
+- NAPI_STATE_DISABLE, /* Disable pending */
+-};
+-
+-extern void __napi_schedule(struct napi_struct *n);
+-
+-static inline int napi_disable_pending(struct napi_struct *n)
+-{
+- return test_bit(NAPI_STATE_DISABLE, &n->state);
+-}
+-
+-/**
+- * napi_schedule_prep - check if napi can be scheduled
+- * @n: napi context
+- *
+- * Test if NAPI routine is already running, and if not mark
+- * it as running. This is used as a condition variable
+- * insure only one NAPI poll instance runs. We also make
+- * sure there is no pending NAPI disable.
+- */
+-static inline int napi_schedule_prep(struct napi_struct *n)
+-{
+- return !napi_disable_pending(n) &&
+- !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
+-}
+-
+-/**
+- * napi_schedule - schedule NAPI poll
+- * @n: napi context
+- *
+- * Schedule NAPI poll routine to be called if it is not already
+- * running.
+- */
+-static inline void napi_schedule(struct napi_struct *n)
+-{
+- if (napi_schedule_prep(n))
+- __napi_schedule(n);
+-}
+-
+-/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
+-static inline int napi_reschedule(struct napi_struct *napi)
+-{
+- if (napi_schedule_prep(napi)) {
+- __napi_schedule(napi);
+- return 1;
+- }
+- return 0;
+-}
+-
+-/**
+- * napi_complete - NAPI processing complete
+- * @n: napi context
+- *
+- * Mark NAPI processing as complete.
+- */
+-static inline void __napi_complete(struct napi_struct *n)
+-{
+- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
+- list_del(&n->poll_list);
+- smp_mb__before_clear_bit();
+- clear_bit(NAPI_STATE_SCHED, &n->state);
+-}
+-
+-static inline void napi_complete(struct napi_struct *n)
+-{
+- unsigned long flags;
+-
+- local_irq_save(flags);
+- __napi_complete(n);
+- local_irq_restore(flags);
+-}
+-
+-/**
+- * napi_disable - prevent NAPI from scheduling
+- * @n: napi context
+- *
+- * Stop NAPI from being scheduled on this context.
+- * Waits till any outstanding processing completes.
+- */
+-static inline void napi_disable(struct napi_struct *n)
+-{
+- set_bit(NAPI_STATE_DISABLE, &n->state);
+- while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
+- msleep(1);
+- clear_bit(NAPI_STATE_DISABLE, &n->state);
+-}
+-
+-/**
+- * napi_enable - enable NAPI scheduling
+- * @n: napi context
+- *
+- * Resume NAPI from being scheduled on this context.
+- * Must be paired with napi_disable.
+- */
+-static inline void napi_enable(struct napi_struct *n)
+-{
+- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
+- smp_mb__before_clear_bit();
+- clear_bit(NAPI_STATE_SCHED, &n->state);
+-}
+-
+-#ifdef CONFIG_SMP
+-/**
+- * napi_synchronize - wait until NAPI is not running
+- * @n: napi context
+- *
+- * Wait until NAPI is done being scheduled on this context.
+- * Waits till any outstanding processing completes but
+- * does not disable future activations.
+- */
+-static inline void napi_synchronize(const struct napi_struct *n)
+-{
+- while (test_bit(NAPI_STATE_SCHED, &n->state))
+- msleep(1);
+-}
+-#else
+-# define napi_synchronize(n) barrier()
+-#endif
+-
+-enum netdev_queue_state_t
+-{
+- __QUEUE_STATE_XOFF,
+- __QUEUE_STATE_FROZEN,
+-};
+-
+-struct netdev_queue {
+- struct net_device *dev;
+- struct Qdisc *qdisc;
+- unsigned long state;
+- spinlock_t _xmit_lock;
+- int xmit_lock_owner;
+- struct Qdisc *qdisc_sleeping;
+-} ____cacheline_aligned_in_smp;
+-
+-/*
+- * The DEVICE structure.
+- * Actually, this whole structure is a big mistake. It mixes I/O
+- * data with strictly "high-level" data, and it has to know about
+- * almost every data structure used in the INET module.
+- *
+- * FIXME: cleanup struct net_device such that network protocol info
+- * moves out.
+- */
+-
+-struct net_device
+-{
+-
+- /*
+- * This is the first field of the "visible" part of this structure
+- * (i.e. as seen by users in the "Space.c" file). It is the name
+- * the interface.
+- */
+- char name[IFNAMSIZ];
+- /* device name hash chain */
+- struct hlist_node name_hlist;
+-
+- /*
+- * I/O specific fields
+- * FIXME: Merge these and struct ifmap into one
+- */
+- unsigned long mem_end; /* shared mem end */
+- unsigned long mem_start; /* shared mem start */
+- unsigned long base_addr; /* device I/O address */
+- unsigned int irq; /* device IRQ number */
+-
+- /*
+- * Some hardware also needs these fields, but they are not
+- * part of the usual set specified in Space.c.
+- */
+-
+- unsigned char if_port; /* Selectable AUI, TP,..*/
+- unsigned char dma; /* DMA channel */
+-
+- unsigned long state;
+-
+- struct list_head dev_list;
+-#ifdef CONFIG_NETPOLL
+- struct list_head napi_list;
+-#endif
+-
+- /* The device initialization function. Called only once. */
+- int (*init)(struct net_device *dev);
+-
+- /* ------- Fields preinitialized in Space.c finish here ------- */
+-
+- /* Net device features */
+- unsigned long features;
+-#define NETIF_F_SG 1 /* Scatter/gather IO. */
+-#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
+-#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
+-#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
+-#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
+-#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
+-#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
+-#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
+-#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
+-#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
+-#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
+-#define NETIF_F_GSO 2048 /* Enable software GSO. */
+-#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
+- /* do not use LLTX in new drivers */
+-#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
+-#define NETIF_F_LRO 32768 /* large receive offload */
+-
+- /* Segmentation offload features */
+-#define NETIF_F_GSO_SHIFT 16
+-#define NETIF_F_GSO_MASK 0xffff0000
+-#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
+-#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
+-#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
+-#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
+-#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
+-
+- /* List of features with software fallbacks. */
+-#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
+-
+-
+-#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
+-#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
+-#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
+-#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
+-
+- /* Interface index. Unique device identifier */
+- int ifindex;
+- int iflink;
+-
+-
+- struct net_device_stats* (*get_stats)(struct net_device *dev);
+- struct net_device_stats stats;
+-
+-#ifdef CONFIG_WIRELESS_EXT
+- /* List of functions to handle Wireless Extensions (instead of ioctl).
+- * See <net/iw_handler.h> for details. Jean II */
+- const struct iw_handler_def * wireless_handlers;
+- /* Instance data managed by the core of Wireless Extensions. */
+- struct iw_public_data * wireless_data;
+-#endif
+- const struct ethtool_ops *ethtool_ops;
+-
+- /* Hardware header description */
+- const struct header_ops *header_ops;
+-
+- /*
+- * This marks the end of the "visible" part of the structure. All
+- * fields hereafter are internal to the system, and may change at
+- * will (read: may be cleaned up at will).
+- */
+-
+-
+- unsigned int flags; /* interface flags (a la BSD) */
+- unsigned short gflags;
+- unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
+- unsigned short padded; /* How much padding added by alloc_netdev() */
+-
+- unsigned char operstate; /* RFC2863 operstate */
+- unsigned char link_mode; /* mapping policy to operstate */
+-
+- unsigned mtu; /* interface MTU value */
+- unsigned short type; /* interface hardware type */
+- unsigned short hard_header_len; /* hardware hdr length */
+-
+- /* extra head- and tailroom the hardware may need, but not in all cases
+- * can this be guaranteed, especially tailroom. Some cases also use
+- * LL_MAX_HEADER instead to allocate the skb.
+- */
+- unsigned short needed_headroom;
+- unsigned short needed_tailroom;
+-
+- struct net_device *master; /* Pointer to master device of a group,
+- * which this device is member of.
+- */
+-
+- /* Interface address info. */
+- unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
+- unsigned char addr_len; /* hardware address length */
+- unsigned short dev_id; /* for shared network cards */
+-
+- spinlock_t addr_list_lock;
+- struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
+- int uc_count; /* Number of installed ucasts */
+- int uc_promisc;
+- struct dev_addr_list *mc_list; /* Multicast mac addresses */
+- int mc_count; /* Number of installed mcasts */
+- unsigned int promiscuity;
+- unsigned int allmulti;
+-
+-
+- /* Protocol specific pointers */
+-
+- void *atalk_ptr; /* AppleTalk link */
+- void *ip_ptr; /* IPv4 specific data */
+- void *dn_ptr; /* DECnet specific data */
+- void *ip6_ptr; /* IPv6 specific data */
+- void *ec_ptr; /* Econet specific data */
+- void *ax25_ptr; /* AX.25 specific data */
+- struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
+- assign before registering */
+-
+-/*
+- * Cache line mostly used on receive path (including eth_type_trans())
+- */
+- unsigned long last_rx; /* Time of last Rx */
+- /* Interface address info used in eth_type_trans() */
+- unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
+- because most packets are unicast) */
+-
+- unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
+-
+- struct netdev_queue rx_queue;
+-
+- struct netdev_queue *_tx ____cacheline_aligned_in_smp;
+-
+- /* Number of TX queues allocated at alloc_netdev_mq() time */
+- unsigned int num_tx_queues;
+-
+- /* Number of TX queues currently active in device */
+- unsigned int real_num_tx_queues;
+-
+- unsigned long tx_queue_len; /* Max frames per queue allowed */
+- spinlock_t tx_global_lock;
+-/*
+- * One part is mostly used on xmit path (device)
+- */
+- void *priv; /* pointer to private data */
+- int (*hard_start_xmit) (struct sk_buff *skb,
+- struct net_device *dev);
+- /* These may be needed for future network-power-down code. */
+- unsigned long trans_start; /* Time (in jiffies) of last Tx */
+-
+- int watchdog_timeo; /* used by dev_watchdog() */
+- struct timer_list watchdog_timer;
+-
+-/*
+- * refcnt is a very hot point, so align it on SMP
+- */
+- /* Number of references to this device */
+- atomic_t refcnt ____cacheline_aligned_in_smp;
+-
+- /* delayed register/unregister */
+- struct list_head todo_list;
+- /* device index hash chain */
+- struct hlist_node index_hlist;
+-
+- struct net_device *link_watch_next;
+-
+- /* register/unregister state machine */
+- enum { NETREG_UNINITIALIZED=0,
+- NETREG_REGISTERED, /* completed register_netdevice */
+- NETREG_UNREGISTERING, /* called unregister_netdevice */
+- NETREG_UNREGISTERED, /* completed unregister todo */
+- NETREG_RELEASED, /* called free_netdev */
+- } reg_state;
+-
+- /* Called after device is detached from network. */
+- void (*uninit)(struct net_device *dev);
+- /* Called after last user reference disappears. */
+- void (*destructor)(struct net_device *dev);
+-
+- /* Pointers to interface service routines. */
+- int (*open)(struct net_device *dev);
+- int (*stop)(struct net_device *dev);
+-#define HAVE_NETDEV_POLL
+-#define HAVE_CHANGE_RX_FLAGS
+- void (*change_rx_flags)(struct net_device *dev,
+- int flags);
+-#define HAVE_SET_RX_MODE
+- void (*set_rx_mode)(struct net_device *dev);
+-#define HAVE_MULTICAST
+- void (*set_multicast_list)(struct net_device *dev);
+-#define HAVE_SET_MAC_ADDR
+- int (*set_mac_address)(struct net_device *dev,
+- void *addr);
+-#define HAVE_VALIDATE_ADDR
+- int (*validate_addr)(struct net_device *dev);
+-#define HAVE_PRIVATE_IOCTL
+- int (*do_ioctl)(struct net_device *dev,
+- struct ifreq *ifr, int cmd);
+-#define HAVE_SET_CONFIG
+- int (*set_config)(struct net_device *dev,
+- struct ifmap *map);
+-#define HAVE_CHANGE_MTU
+- int (*change_mtu)(struct net_device *dev, int new_mtu);
+-
+-#define HAVE_TX_TIMEOUT
+- void (*tx_timeout) (struct net_device *dev);
+-
+- void (*vlan_rx_register)(struct net_device *dev,
+- struct vlan_group *grp);
+- void (*vlan_rx_add_vid)(struct net_device *dev,
+- unsigned short vid);
+- void (*vlan_rx_kill_vid)(struct net_device *dev,
+- unsigned short vid);
+-
+- int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
+-#ifdef CONFIG_NETPOLL
+- struct netpoll_info *npinfo;
+-#endif
+-#ifdef CONFIG_NET_POLL_CONTROLLER
+- void (*poll_controller)(struct net_device *dev);
+-#endif
+-
+- u16 (*select_queue)(struct net_device *dev,
+- struct sk_buff *skb);
+-
+-#ifdef CONFIG_NET_NS
+- /* Network namespace this network device is inside */
+- struct net *nd_net;
+-#endif
+-
+- /* mid-layer private */
+- void *ml_priv;
+-
+- /* bridge stuff */
+- struct net_bridge_port *br_port;
+- /* macvlan */
+- struct macvlan_port *macvlan_port;
+- /* GARP */
+- struct garp_port *garp_port;
+-
+- /* class/net/name entry */
+- struct device dev;
+- /* space for optional statistics and wireless sysfs groups */
+- struct attribute_group *sysfs_groups[3];
+-
+- /* rtnetlink link ops */
+- const struct rtnl_link_ops *rtnl_link_ops;
+-
+- /* VLAN feature mask */
+- unsigned long vlan_features;
+-
+- /* for setting kernel sock attribute on TCP connection setup */
+-#define GSO_MAX_SIZE 65536
+- unsigned int gso_max_size;
+-};
+-#define to_net_dev(d) container_of(d, struct net_device, dev)
+-
+-#define NETDEV_ALIGN 32
+-#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
+-
+-static inline
+-struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
+- unsigned int index)
+-{
+- return &dev->_tx[index];
+-}
+-
+-static inline void netdev_for_each_tx_queue(struct net_device *dev,
+- void (*f)(struct net_device *,
+- struct netdev_queue *,
+- void *),
+- void *arg)
+-{
+- unsigned int i;
+-
+- for (i = 0; i < dev->num_tx_queues; i++)
+- f(dev, &dev->_tx[i], arg);
+-}
+-
+-/*
+- * Net namespace inlines
+- */
+-static inline
+-struct net *dev_net(const struct net_device *dev)
+-{
+-#ifdef CONFIG_NET_NS
+- return dev->nd_net;
+-#else
+- return &init_net;
+-#endif
+-}
+-
+-static inline
+-void dev_net_set(struct net_device *dev, struct net *net)
+-{
+-#ifdef CONFIG_NET_NS
+- release_net(dev->nd_net);
+- dev->nd_net = hold_net(net);
+-#endif
+-}
+-
+-/**
+- * netdev_priv - access network device private data
+- * @dev: network device
+- *
+- * Get network device private data
+- */
+-static inline void *netdev_priv(const struct net_device *dev)
+-{
+- return (char *)dev + ((sizeof(struct net_device)
+- + NETDEV_ALIGN_CONST)
+- & ~NETDEV_ALIGN_CONST);
+-}
+-
+-/* Set the sysfs physical device reference for the network logical device
+- * if set prior to registration will cause a symlink during initialization.
+- */
+-#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
+-
+-/**
+- * netif_napi_add - initialize a napi context
+- * @dev: network device
+- * @napi: napi context
+- * @poll: polling function
+- * @weight: default weight
+- *
+- * netif_napi_add() must be used to initialize a napi context prior to calling
+- * *any* of the other napi related functions.
+- */
+-static inline void netif_napi_add(struct net_device *dev,
+- struct napi_struct *napi,
+- int (*poll)(struct napi_struct *, int),
+- int weight)
+-{
+- INIT_LIST_HEAD(&napi->poll_list);
+- napi->poll = poll;
+- napi->weight = weight;
+-#ifdef CONFIG_NETPOLL
+- napi->dev = dev;
+- list_add(&napi->dev_list, &dev->napi_list);
+- spin_lock_init(&napi->poll_lock);
+- napi->poll_owner = -1;
+-#endif
+- set_bit(NAPI_STATE_SCHED, &napi->state);
+-}
+-
+-/**
+- * netif_napi_del - remove a napi context
+- * @napi: napi context
+- *
+- * netif_napi_del() removes a napi context from the network device napi list
+- */
+-static inline void netif_napi_del(struct napi_struct *napi)
+-{
+-#ifdef CONFIG_NETPOLL
+- list_del(&napi->dev_list);
+-#endif
+-}
+-
+-struct packet_type {
+- __be16 type; /* This is really htons(ether_type). */
+- struct net_device *dev; /* NULL is wildcarded here */
+- unsigned char sknid_elevator;
+- int (*func) (struct sk_buff *,
+- struct net_device *,
+- struct packet_type *,
+- struct net_device *);
+- struct sk_buff *(*gso_segment)(struct sk_buff *skb,
+- int features);
+- int (*gso_send_check)(struct sk_buff *skb);
+- void *af_packet_priv;
+- struct list_head list;
+-};
+-
+-#include <linux/interrupt.h>
+-#include <linux/notifier.h>
+-
+-extern rwlock_t dev_base_lock; /* Device list lock */
+-
+-
+-#define for_each_netdev(net, d) \
+- list_for_each_entry(d, &(net)->dev_base_head, dev_list)
+-#define for_each_netdev_safe(net, d, n) \
+- list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
+-#define for_each_netdev_continue(net, d) \
+- list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
+-#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
+-
+-static inline struct net_device *next_net_device(struct net_device *dev)
+-{
+- struct list_head *lh;
+- struct net *net;
+-
+- net = dev_net(dev);
+- lh = dev->dev_list.next;
+- return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
+-}
+-
+-static inline struct net_device *first_net_device(struct net *net)
+-{
+- return list_empty(&net->dev_base_head) ? NULL :
+- net_device_entry(net->dev_base_head.next);
+-}
+-
+-extern int netdev_boot_setup_check(struct net_device *dev);
+-extern unsigned long netdev_boot_base(const char *prefix, int unit);
+-extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
+-extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
+-extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
+-extern void dev_add_pack(struct packet_type *pt);
+-extern void dev_remove_pack(struct packet_type *pt);
+-extern void __dev_remove_pack(struct packet_type *pt);
+-
+-extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
+- unsigned short mask);
+-extern struct net_device *dev_get_by_name(struct net *net, const char *name);
+-extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
+-extern int dev_alloc_name(struct net_device *dev, const char *name);
+-extern int dev_open(struct net_device *dev);
+-extern int dev_close(struct net_device *dev);
+-extern void dev_disable_lro(struct net_device *dev);
+-extern int dev_queue_xmit(struct sk_buff *skb);
+-extern int register_netdevice(struct net_device *dev);
+-extern void unregister_netdevice(struct net_device *dev);
+-extern void free_netdev(struct net_device *dev);
+-extern void synchronize_net(void);
+-extern int register_netdevice_notifier(struct notifier_block *nb);
+-extern int unregister_netdevice_notifier(struct notifier_block *nb);
+-extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
+-extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
+-extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+-extern int dev_restart(struct net_device *dev);
+-#ifdef CONFIG_NETPOLL_TRAP
+-extern int netpoll_trap(void);
+-#endif
+-
+-static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
+- unsigned short type,
+- const void *daddr, const void *saddr,
+- unsigned len)
+-{
+- if (!dev->header_ops || !dev->header_ops->create)
+- return 0;
+-
+- return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
+-}
+-
+-static inline int dev_parse_header(const struct sk_buff *skb,
+- unsigned char *haddr)
+-{
+- const struct net_device *dev = skb->dev;
+-
+- if (!dev->header_ops || !dev->header_ops->parse)
+- return 0;
+- return dev->header_ops->parse(skb, haddr);
+-}
+-
+-typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
+-extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
+-static inline int unregister_gifconf(unsigned int family)
+-{
+- return register_gifconf(family, NULL);
+-}
+-
+-/*
+- * Incoming packets are placed on per-cpu queues so that
+- * no locking is needed.
+- */
+-struct softnet_data
+-{
+- struct Qdisc *output_queue;
+- struct sk_buff_head input_pkt_queue;
+- struct list_head poll_list;
+- struct sk_buff *completion_queue;
+-
+- struct napi_struct backlog;
+-#ifdef CONFIG_NET_DMA
+- struct dma_chan *net_dma;
+-#endif
+-};
+-
+-DECLARE_PER_CPU(struct softnet_data,softnet_data);
+-
+-#define HAVE_NETIF_QUEUE
+-
+-extern void __netif_schedule(struct Qdisc *q);
+-
+-static inline void netif_schedule_queue(struct netdev_queue *txq)
+-{
+- if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
+- __netif_schedule(txq->qdisc);
+-}
+-
+-static inline void netif_tx_schedule_all(struct net_device *dev)
+-{
+- unsigned int i;
+-
+- for (i = 0; i < dev->num_tx_queues; i++)
+- netif_schedule_queue(netdev_get_tx_queue(dev, i));
+-}
+-
+-static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
+-{
+- clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+-}
+-
+-/**
+- * netif_start_queue - allow transmit
+- * @dev: network device
+- *
+- * Allow upper layers to call the device hard_start_xmit routine.
+- */
+-static inline void netif_start_queue(struct net_device *dev)
+-{
+- netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
+-}
+-
+-static inline void netif_tx_start_all_queues(struct net_device *dev)
+-{
+- unsigned int i;
+-
+- for (i = 0; i < dev->num_tx_queues; i++) {
+- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+- netif_tx_start_queue(txq);
+- }
+-}
+-
+-static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
+-{
+-#ifdef CONFIG_NETPOLL_TRAP
+- if (netpoll_trap()) {
+- clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+- return;
+- }
+-#endif
+- if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
+- __netif_schedule(dev_queue->qdisc);
+-}
+-
+-/**
+- * netif_wake_queue - restart transmit
+- * @dev: network device
+- *
+- * Allow upper layers to call the device hard_start_xmit routine.
+- * Used for flow control when transmit resources are available.
+- */
+-static inline void netif_wake_queue(struct net_device *dev)
+-{
+- netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
+-}
+-
+-static inline void netif_tx_wake_all_queues(struct net_device *dev)
+-{
+- unsigned int i;
+-
+- for (i = 0; i < dev->num_tx_queues; i++) {
+- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+- netif_tx_wake_queue(txq);
+- }
+-}
+-
+-static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
+-{
+- set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+-}
+-
+-/**
+- * netif_stop_queue - stop transmitted packets
+- * @dev: network device
+- *
+- * Stop upper layers calling the device hard_start_xmit routine.
+- * Used for flow control when transmit resources are unavailable.
+- */
+-static inline void netif_stop_queue(struct net_device *dev)
+-{
+- netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
+-}
+-
+-static inline void netif_tx_stop_all_queues(struct net_device *dev)
+-{
+- unsigned int i;
+-
+- for (i = 0; i < dev->num_tx_queues; i++) {
+- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+- netif_tx_stop_queue(txq);
+- }
+-}
+-
+-static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
+-{
+- return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
+-}
+-
+-/**
+- * netif_queue_stopped - test if transmit queue is flowblocked
+- * @dev: network device
+- *
+- * Test if transmit queue on device is currently unable to send.
+- */
+-static inline int netif_queue_stopped(const struct net_device *dev)
+-{
+- return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
+-}
+-
+-static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
+-{
+- return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
+-}
+-
+-/**
+- * netif_running - test if up
+- * @dev: network device
+- *
+- * Test if the device has been brought up.
+- */
+-static inline int netif_running(const struct net_device *dev)
+-{
+- return test_bit(__LINK_STATE_START, &dev->state);
+-}
+-
+-/*
+- * Routines to manage the subqueues on a device. We only need start
+- * stop, and a check if it's stopped. All other device management is
+- * done at the overall netdevice level.
+- * Also test the device if we're multiqueue.
+- */
+-
+-/**
+- * netif_start_subqueue - allow sending packets on subqueue
+- * @dev: network device
+- * @queue_index: sub queue index
+- *
+- * Start individual transmit queue of a device with multiple transmit queues.
+- */
+-static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
+-{
+- struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+- clear_bit(__QUEUE_STATE_XOFF, &txq->state);
+-}
+-
+-/**
+- * netif_stop_subqueue - stop sending packets on subqueue
+- * @dev: network device
+- * @queue_index: sub queue index
+- *
+- * Stop individual transmit queue of a device with multiple transmit queues.
+- */
+-static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
+-{
+- struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+-#ifdef CONFIG_NETPOLL_TRAP
+- if (netpoll_trap())
+- return;
+-#endif
+- set_bit(__QUEUE_STATE_XOFF, &txq->state);
+-}
+-
+-/**
+- * netif_subqueue_stopped - test status of subqueue
+- * @dev: network device
+- * @queue_index: sub queue index
+- *
+- * Check individual transmit queue of a device with multiple transmit queues.
+- */
+-static inline int __netif_subqueue_stopped(const struct net_device *dev,
+- u16 queue_index)
+-{
+- struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+- return test_bit(__QUEUE_STATE_XOFF, &txq->state);
+-}
+-
+-static inline int netif_subqueue_stopped(const struct net_device *dev,
+- struct sk_buff *skb)
+-{
+- return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
+-}
+-
+-/**
+- * netif_wake_subqueue - allow sending packets on subqueue
+- * @dev: network device
+- * @queue_index: sub queue index
+- *
+- * Resume individual transmit queue of a device with multiple transmit queues.
+- */
+-static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
+-{
+- struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+-#ifdef CONFIG_NETPOLL_TRAP
+- if (netpoll_trap())
+- return;
+-#endif
+- if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
+- __netif_schedule(txq->qdisc);
+-}
+-
+-/**
+- * netif_is_multiqueue - test if device has multiple transmit queues
+- * @dev: network device
+- *
+- * Check if device has multiple transmit queues
+- */
+-static inline int netif_is_multiqueue(const struct net_device *dev)
+-{
+- return (dev->num_tx_queues > 1);
+-}
+-
+-/* Use this variant when it is known for sure that it
+- * is executing from hardware interrupt context or with hardware interrupts
+- * disabled.
+- */
+-extern void dev_kfree_skb_irq(struct sk_buff *skb);
+-
+-/* Use this variant in places where it could be invoked
+- * from either hardware interrupt or other context, with hardware interrupts
+- * either disabled or enabled.
+- */
+-extern void dev_kfree_skb_any(struct sk_buff *skb);
+-
+-#define HAVE_NETIF_RX 1
+-extern int netif_rx(struct sk_buff *skb);
+-extern int netif_rx_ni(struct sk_buff *skb);
+-#define HAVE_NETIF_RECEIVE_SKB 1
+-extern int netif_receive_skb(struct sk_buff *skb);
+-extern void netif_nit_deliver(struct sk_buff *skb);
+-extern int dev_valid_name(const char *name);
+-extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
+-extern int dev_ethtool(struct net *net, struct ifreq *);
+-extern unsigned dev_get_flags(const struct net_device *);
+-extern int dev_change_flags(struct net_device *, unsigned);
+-extern int dev_change_name(struct net_device *, char *);
+-extern int dev_change_net_namespace(struct net_device *,
+- struct net *, const char *);
+-extern int dev_set_mtu(struct net_device *, int);
+-extern int dev_set_mac_address(struct net_device *,
+- struct sockaddr *);
+-extern int dev_hard_start_xmit(struct sk_buff *skb,
+- struct net_device *dev,
+- struct netdev_queue *txq);
+-
+-extern int netdev_budget;
+-
+-/* Called by rtnetlink.c:rtnl_unlock() */
+-extern void netdev_run_todo(void);
+-
+-/**
+- * dev_put - release reference to device
+- * @dev: network device
+- *
+- * Release reference to device to allow it to be freed.
+- */
+-static inline void dev_put(struct net_device *dev)
+-{
+- atomic_dec(&dev->refcnt);
+-}
+-
+-/**
+- * dev_hold - get reference to device
+- * @dev: network device
+- *
+- * Hold reference to device to keep it from being freed.
+- */
+-static inline void dev_hold(struct net_device *dev)
+-{
+- atomic_inc(&dev->refcnt);
+-}
+-
+-/* Carrier loss detection, dial on demand. The functions netif_carrier_on
+- * and _off may be called from IRQ context, but it is caller
+- * who is responsible for serialization of these calls.
+- *
+- * The name carrier is inappropriate, these functions should really be
+- * called netif_lowerlayer_*() because they represent the state of any
+- * kind of lower layer not just hardware media.
+- */
+-
+-extern void linkwatch_fire_event(struct net_device *dev);
+-
+-/**
+- * netif_carrier_ok - test if carrier present
+- * @dev: network device
+- *
+- * Check if carrier is present on device
+- */
+-static inline int netif_carrier_ok(const struct net_device *dev)
+-{
+- return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
+-}
+-
+-extern void __netdev_watchdog_up(struct net_device *dev);
+-
+-extern void netif_carrier_on(struct net_device *dev);
+-
+-extern void netif_carrier_off(struct net_device *dev);
+-
+-/**
+- * netif_dormant_on - mark device as dormant.
+- * @dev: network device
+- *
+- * Mark device as dormant (as per RFC2863).
+- *
+- * The dormant state indicates that the relevant interface is not
+- * actually in a condition to pass packets (i.e., it is not 'up') but is
+- * in a "pending" state, waiting for some external event. For "on-
+- * demand" interfaces, this new state identifies the situation where the
+- * interface is waiting for events to place it in the up state.
+- *
+- */
+-static inline void netif_dormant_on(struct net_device *dev)
+-{
+- if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
+- linkwatch_fire_event(dev);
+-}
+-
+-/**
+- * netif_dormant_off - set device as not dormant.
+- * @dev: network device
+- *
+- * Device is not in dormant state.
+- */
+-static inline void netif_dormant_off(struct net_device *dev)
+-{
+- if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
+- linkwatch_fire_event(dev);
+-}
+-
+-/**
+- * netif_dormant - test if carrier present
+- * @dev: network device
+- *
+- * Check if carrier is present on device
+- */
+-static inline int netif_dormant(const struct net_device *dev)
+-{
+- return test_bit(__LINK_STATE_DORMANT, &dev->state);
+-}
+-
+-
+-/**
+- * netif_oper_up - test if device is operational
+- * @dev: network device
+- *
+- * Check if carrier is operational
+- */
+-static inline int netif_oper_up(const struct net_device *dev) {
+- return (dev->operstate == IF_OPER_UP ||
+- dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
+-}
+-
+-/**
+- * netif_device_present - is device available or removed
+- * @dev: network device
+- *
+- * Check if device has not been removed from system.
+- */
+-static inline int netif_device_present(struct net_device *dev)
+-{
+- return test_bit(__LINK_STATE_PRESENT, &dev->state);
+-}
+-
+-extern void netif_device_detach(struct net_device *dev);
+-
+-extern void netif_device_attach(struct net_device *dev);
+-
+-/*
+- * Network interface message level settings
+- */
+-#define HAVE_NETIF_MSG 1
+-
+-enum {
+- NETIF_MSG_DRV = 0x0001,
+- NETIF_MSG_PROBE = 0x0002,
+- NETIF_MSG_LINK = 0x0004,
+- NETIF_MSG_TIMER = 0x0008,
+- NETIF_MSG_IFDOWN = 0x0010,
+- NETIF_MSG_IFUP = 0x0020,
+- NETIF_MSG_RX_ERR = 0x0040,
+- NETIF_MSG_TX_ERR = 0x0080,
+- NETIF_MSG_TX_QUEUED = 0x0100,
+- NETIF_MSG_INTR = 0x0200,
+- NETIF_MSG_TX_DONE = 0x0400,
+- NETIF_MSG_RX_STATUS = 0x0800,
+- NETIF_MSG_PKTDATA = 0x1000,
+- NETIF_MSG_HW = 0x2000,
+- NETIF_MSG_WOL = 0x4000,
+-};
+-
+-#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
+-#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
+-#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
+-#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
+-#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
+-#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
+-#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
+-#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
+-#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
+-#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
+-#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
+-#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
+-#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
+-#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
+-#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
+-
+-static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
+-{
+- /* use default */
+- if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
+- return default_msg_enable_bits;
+- if (debug_value == 0) /* no output */
+- return 0;
+- /* set low N bits */
+- return (1 << debug_value) - 1;
+-}
+-
+-/* Test if receive needs to be scheduled but only if up */
+-static inline int netif_rx_schedule_prep(struct net_device *dev,
+- struct napi_struct *napi)
+-{
+- return napi_schedule_prep(napi);
+-}
+-
+-/* Add interface to tail of rx poll list. This assumes that _prep has
+- * already been called and returned 1.
+- */
+-static inline void __netif_rx_schedule(struct net_device *dev,
+- struct napi_struct *napi)
+-{
+- __napi_schedule(napi);
+-}
+-
+-/* Try to reschedule poll. Called by irq handler. */
+-
+-static inline void netif_rx_schedule(struct net_device *dev,
+- struct napi_struct *napi)
+-{
+- if (netif_rx_schedule_prep(dev, napi))
+- __netif_rx_schedule(dev, napi);
+-}
+-
+-/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
+-static inline int netif_rx_reschedule(struct net_device *dev,
+- struct napi_struct *napi)
+-{
+- if (napi_schedule_prep(napi)) {
+- __netif_rx_schedule(dev, napi);
+- return 1;
+- }
+- return 0;
+-}
+-
+-/* same as netif_rx_complete, except that local_irq_save(flags)
+- * has already been issued
+- */
+-static inline void __netif_rx_complete(struct net_device *dev,
+- struct napi_struct *napi)
+-{
+- __napi_complete(napi);
+-}
+-
+-/* Remove interface from poll list: it must be in the poll list
+- * on current cpu. This primitive is called by dev->poll(), when
+- * it completes the work. The device cannot be out of poll list at this
+- * moment, it is BUG().
+- */
+-static inline void netif_rx_complete(struct net_device *dev,
+- struct napi_struct *napi)
+-{
+- unsigned long flags;
+-
+- local_irq_save(flags);
+- __netif_rx_complete(dev, napi);
+- local_irq_restore(flags);
+-}
+-
+-static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
+-{
+- spin_lock(&txq->_xmit_lock);
+- txq->xmit_lock_owner = cpu;
+-}
+-
+-static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
+-{
+- spin_lock_bh(&txq->_xmit_lock);
+- txq->xmit_lock_owner = smp_processor_id();
+-}
+-
+-static inline int __netif_tx_trylock(struct netdev_queue *txq)
+-{
+- int ok = spin_trylock(&txq->_xmit_lock);
+- if (likely(ok))
+- txq->xmit_lock_owner = smp_processor_id();
+- return ok;
+-}
+-
+-static inline void __netif_tx_unlock(struct netdev_queue *txq)
+-{
+- txq->xmit_lock_owner = -1;
+- spin_unlock(&txq->_xmit_lock);
+-}
+-
+-static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
+-{
+- txq->xmit_lock_owner = -1;
+- spin_unlock_bh(&txq->_xmit_lock);
+-}
+-
+-/**
+- * netif_tx_lock - grab network device transmit lock
+- * @dev: network device
+- * @cpu: cpu number of lock owner
+- *
+- * Get network device transmit lock
+- */
+-static inline void netif_tx_lock(struct net_device *dev)
+-{
+- unsigned int i;
+- int cpu;
+-
+- spin_lock(&dev->tx_global_lock);
+- cpu = smp_processor_id();
+- for (i = 0; i < dev->num_tx_queues; i++) {
+- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+-
+- /* We are the only thread of execution doing a
+- * freeze, but we have to grab the _xmit_lock in
+- * order to synchronize with threads which are in
+- * the ->hard_start_xmit() handler and already
+- * checked the frozen bit.
+- */
+- __netif_tx_lock(txq, cpu);
+- set_bit(__QUEUE_STATE_FROZEN, &txq->state);
+- __netif_tx_unlock(txq);
+- }
+-}
+-
+-static inline void netif_tx_lock_bh(struct net_device *dev)
+-{
+- local_bh_disable();
+- netif_tx_lock(dev);
+-}
+-
+-static inline void netif_tx_unlock(struct net_device *dev)
+-{
+- unsigned int i;
+-
+- for (i = 0; i < dev->num_tx_queues; i++) {
+- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+-
+- /* No need to grab the _xmit_lock here. If the
+- * queue is not stopped for another reason, we
+- * force a schedule.
+- */
+- clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
+- if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
+- __netif_schedule(txq->qdisc);
+- }
+- spin_unlock(&dev->tx_global_lock);
+-}
+-
+-static inline void netif_tx_unlock_bh(struct net_device *dev)
+-{
+- netif_tx_unlock(dev);
+- local_bh_enable();
+-}
+-
+-#define HARD_TX_LOCK(dev, txq, cpu) { \
+- if ((dev->features & NETIF_F_LLTX) == 0) { \
+- __netif_tx_lock(txq, cpu); \
+- } \
+-}
+-
+-#define HARD_TX_UNLOCK(dev, txq) { \
+- if ((dev->features & NETIF_F_LLTX) == 0) { \
+- __netif_tx_unlock(txq); \
+- } \
+-}
+-
+-static inline void netif_tx_disable(struct net_device *dev)
+-{
+- unsigned int i;
+- int cpu;
+-
+- local_bh_disable();
+- cpu = smp_processor_id();
+- for (i = 0; i < dev->num_tx_queues; i++) {
+- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+-
+- __netif_tx_lock(txq, cpu);
+- netif_tx_stop_queue(txq);
+- __netif_tx_unlock(txq);
+- }
+- local_bh_enable();
+-}
+-
+-static inline void netif_addr_lock(struct net_device *dev)
+-{
+- spin_lock(&dev->addr_list_lock);
+-}
+-
+-static inline void netif_addr_lock_bh(struct net_device *dev)
+-{
+- spin_lock_bh(&dev->addr_list_lock);
+-}
+-
+-static inline void netif_addr_unlock(struct net_device *dev)
+-{
+- spin_unlock(&dev->addr_list_lock);
+-}
+-
+-static inline void netif_addr_unlock_bh(struct net_device *dev)
+-{
+- spin_unlock_bh(&dev->addr_list_lock);
+-}
+-
+-/* These functions live elsewhere (drivers/net/net_init.c, but related) */
+-
+-extern void ether_setup(struct net_device *dev);
+-
+-/* Support for loadable net-drivers */
+-extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
+- void (*setup)(struct net_device *),
+- unsigned int queue_count);
+-#define alloc_netdev(sizeof_priv, name, setup) \
+- alloc_netdev_mq(sizeof_priv, name, setup, 1)
+-extern int register_netdev(struct net_device *dev);
+-extern void unregister_netdev(struct net_device *dev);
+-/* Functions used for secondary unicast and multicast support */
+-extern void dev_set_rx_mode(struct net_device *dev);
+-extern void __dev_set_rx_mode(struct net_device *dev);
+-extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
+-extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
+-extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
+-extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
+-extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
+-extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
+-extern int dev_mc_sync(struct net_device *to, struct net_device *from);
+-extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
+-extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
+-extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
+-extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
+-extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
+-extern int dev_set_promiscuity(struct net_device *dev, int inc);
+-extern int dev_set_allmulti(struct net_device *dev, int inc);
+-extern void netdev_state_change(struct net_device *dev);
+-extern void netdev_bonding_change(struct net_device *dev);
+-extern void netdev_features_change(struct net_device *dev);
+-/* Load a device via the kmod */
+-extern void dev_load(struct net *net, const char *name);
+-extern void dev_mcast_init(void);
+-extern int netdev_max_backlog;
+-extern int weight_p;
+-extern int netdev_set_master(struct net_device *dev, struct net_device *master);
+-extern int skb_checksum_help(struct sk_buff *skb);
+-extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
+-#ifdef CONFIG_BUG
+-extern void netdev_rx_csum_fault(struct net_device *dev);
+-#else
+-static inline void netdev_rx_csum_fault(struct net_device *dev)
+-{
+-}
+-#endif
+-/* rx skb timestamps */
+-extern void net_enable_timestamp(void);
+-extern void net_disable_timestamp(void);
+-
+-#ifdef CONFIG_PROC_FS
+-extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
+-extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
+-extern void dev_seq_stop(struct seq_file *seq, void *v);
+-#endif
+-
+-extern int netdev_class_create_file(struct class_attribute *class_attr);
+-extern void netdev_class_remove_file(struct class_attribute *class_attr);
+-
+-extern char *netdev_drivername(struct net_device *dev, char *buffer, int len);
+-
+-extern void linkwatch_run_queue(void);
+-
+-extern int netdev_compute_features(unsigned long all, unsigned long one);
+-
+-static inline int net_gso_ok(int features, int gso_type)
+-{
+- int feature = gso_type << NETIF_F_GSO_SHIFT;
+- return (features & feature) == feature;
+-}
+-
+-static inline int skb_gso_ok(struct sk_buff *skb, int features)
+-{
+- return net_gso_ok(features, skb_shinfo(skb)->gso_type);
+-}
+-
+-static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
+-{
+- return skb_is_gso(skb) &&
+- (!skb_gso_ok(skb, dev->features) ||
+- unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
+-}
+-
+-static inline void netif_set_gso_max_size(struct net_device *dev,
+- unsigned int size)
+-{
+- dev->gso_max_size = size;
+-}
+-
+-/* On bonding slaves other than the currently active slave, suppress
+- * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
+- * ARP on active-backup slaves with arp_validate enabled.
+- */
+-static inline int skb_bond_should_drop(struct sk_buff *skb)
+-{
+- struct net_device *dev = skb->dev;
+- struct net_device *master = dev->master;
+-
+- if (master &&
+- (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
+- if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
+- skb->protocol == __constant_htons(ETH_P_ARP))
+- return 0;
+-
+- if (master->priv_flags & IFF_MASTER_ALB) {
+- if (skb->pkt_type != PACKET_BROADCAST &&
+- skb->pkt_type != PACKET_MULTICAST)
+- return 0;
+- }
+- if (master->priv_flags & IFF_MASTER_8023AD &&
+- skb->protocol == __constant_htons(ETH_P_SLOW))
+- return 0;
+-
+- return 1;
+- }
+- return 0;
+-}
+-
+-#endif /* __KERNEL__ */
+-
+-#endif /* _LINUX_DEV_H */
+diff -Nurb linux-2.6.27-720/include/linux/netlink.h linux-2.6.27-710/include/linux/netlink.h
+--- linux-2.6.27-720/include/linux/netlink.h 2009-05-04 12:18:34.000000000 -0400
++++ linux-2.6.27-710/include/linux/netlink.h 2008-10-09 18:13:53.000000000 -0400
+@@ -242,7 +242,7 @@
+ nlh->nlmsg_flags = flags;
+ nlh->nlmsg_pid = pid;
+ nlh->nlmsg_seq = seq;
+- memset((char*) NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
++ memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
+ return nlh;
+ }
+
+diff -Nurb linux-2.6.27-720/include/linux/page-flags.h linux-2.6.27-710/include/linux/page-flags.h
+--- linux-2.6.27-720/include/linux/page-flags.h 2009-05-04 12:18:34.000000000 -0400
++++ linux-2.6.27-710/include/linux/page-flags.h 2008-10-09 18:13:53.000000000 -0400
+@@ -174,10 +174,8 @@
+ PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
+ PAGEFLAG(SavePinned, savepinned); /* Xen */
+ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
+-#ifndef __cplusplus
+ PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
+ __SETPAGEFLAG(Private, private)
+-#endif
+
+ __PAGEFLAG(SlobPage, slob_page)
+ __PAGEFLAG(SlobFree, slob_free)
+diff -Nurb linux-2.6.27-720/include/linux/prefetch.h linux-2.6.27-710/include/linux/prefetch.h
+--- linux-2.6.27-720/include/linux/prefetch.h 2009-05-04 12:18:34.000000000 -0400
++++ linux-2.6.27-710/include/linux/prefetch.h 2008-10-09 18:13:53.000000000 -0400
+@@ -54,9 +54,9 @@
+ {
+ #ifdef ARCH_HAS_PREFETCH
+ char *cp;
+- char *end = (char*)(addr) + len;
++ char *end = addr + len;