-diff -Nurb linux-2.6.27-720/include/linux/sched.h.orig linux-2.6.27-710/include/linux/sched.h.orig
---- linux-2.6.27-720/include/linux/sched.h.orig 2009-05-04 12:15:13.000000000 -0400
-+++ linux-2.6.27-710/include/linux/sched.h.orig 1969-12-31 19:00:00.000000000 -0500
-@@ -1,2244 +0,0 @@
--#ifndef _LINUX_SCHED_H
--#define _LINUX_SCHED_H
--
--/*
-- * cloning flags:
-- */
--#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
--#define CLONE_VM 0x00000100 /* set if VM shared between processes */
--#define CLONE_FS 0x00000200 /* set if fs info shared between processes */
--#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
--#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */
--#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */
--#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
--#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
--#define CLONE_THREAD 0x00010000 /* Same thread group? */
--#define CLONE_NEWNS 0x00020000 /* New namespace group? */
--#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */
--#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */
--#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */
--#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */
--#define CLONE_DETACHED 0x00400000 /* Unused, ignored */
--#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */
--#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */
--#define CLONE_STOPPED 0x02000000 /* Start in stopped state */
--#define CLONE_NEWUTS 0x04000000 /* New utsname group? */
--#define CLONE_NEWIPC 0x08000000 /* New ipcs */
--#define CLONE_NEWUSER 0x10000000 /* New user namespace */
--#define CLONE_NEWPID 0x20000000 /* New pid namespace */
--#define CLONE_NEWNET 0x40000000 /* New network namespace */
--#define CLONE_IO 0x80000000 /* Clone io context */
--
--/*
-- * Scheduling policies
-- */
--#define SCHED_NORMAL 0
--#define SCHED_FIFO 1
--#define SCHED_RR 2
--#define SCHED_BATCH 3
--/* SCHED_ISO: reserved but not implemented yet */
--#define SCHED_IDLE 5
--
--#ifdef __KERNEL__
--
--struct sched_param {
-- int sched_priority;
--};
--
--#include <asm/param.h> /* for HZ */
--
--#include <linux/capability.h>
--#include <linux/threads.h>
--#include <linux/kernel.h>
--#include <linux/types.h>
--#include <linux/timex.h>
--#include <linux/jiffies.h>
--#include <linux/rbtree.h>
--#include <linux/thread_info.h>
--#include <linux/cpumask.h>
--#include <linux/errno.h>
--#include <linux/nodemask.h>
--#include <linux/mm_types.h>
--
--#include <asm/system.h>
--#include <asm/page.h>
--#include <asm/ptrace.h>
--#include <asm/cputime.h>
--
--#include <linux/smp.h>
--#include <linux/sem.h>
--#include <linux/signal.h>
--#include <linux/fs_struct.h>
--#include <linux/compiler.h>
--#include <linux/completion.h>
--#include <linux/percpu.h>
--#include <linux/topology.h>
--#include <linux/proportions.h>
--#include <linux/seccomp.h>
--#include <linux/rcupdate.h>
--#include <linux/rtmutex.h>
--
--#include <linux/time.h>
--#include <linux/param.h>
--#include <linux/resource.h>
--#include <linux/timer.h>
--#include <linux/hrtimer.h>
--#include <linux/task_io_accounting.h>
--#include <linux/kobject.h>
--#include <linux/latencytop.h>
--#include <linux/cred.h>
--#include <linux/pid.h>
--
--#include <asm/processor.h>
--
--struct mem_cgroup;
--struct exec_domain;
--struct futex_pi_state;
--struct robust_list_head;
--struct bio;
--
--/*
-- * List of flags we want to share for kernel threads,
-- * if only because they are not used by them anyway.
-- */
--#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
--
--/*
-- * These are the constant used to fake the fixed-point load-average
-- * counting. Some notes:
-- * - 11 bit fractions expand to 22 bits by the multiplies: this gives
-- * a load-average precision of 10 bits integer + 11 bits fractional
-- * - if you want to count load-averages more often, you need more
-- * precision, or rounding will get you. With 2-second counting freq,
-- * the EXP_n values would be 1981, 2034 and 2043 if still using only
-- * 11 bit fractions.
-- */
--extern unsigned long avenrun[]; /* Load averages */
--
--#define FSHIFT 11 /* nr of bits of precision */
--#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
--#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */
--#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
--#define EXP_5 2014 /* 1/exp(5sec/5min) */
--#define EXP_15 2037 /* 1/exp(5sec/15min) */
--
--#define CALC_LOAD(load,exp,n) \
-- load *= exp; \
-- load += n*(FIXED_1-exp); \
-- load >>= FSHIFT;
--
--extern unsigned long total_forks;
--extern int nr_threads;
--DECLARE_PER_CPU(unsigned long, process_counts);
--extern int nr_processes(void);
--extern unsigned long nr_running(void);
--extern unsigned long nr_uninterruptible(void);
--extern unsigned long nr_active(void);
--extern unsigned long nr_iowait(void);
--
--struct seq_file;
--struct cfs_rq;
--struct task_group;
--#ifdef CONFIG_SCHED_DEBUG
--extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
--extern void proc_sched_set_task(struct task_struct *p);
--extern void
--print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
--#else
--static inline void
--proc_sched_show_task(struct task_struct *p, struct seq_file *m)
--{
--}
--static inline void proc_sched_set_task(struct task_struct *p)
--{
--}
--static inline void
--print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
--{
--}
--#endif
--
--extern unsigned long long time_sync_thresh;
--
--/*
-- * Task state bitmask. NOTE! These bits are also
-- * encoded in fs/proc/array.c: get_task_state().
-- *
-- * We have two separate sets of flags: task->state
-- * is about runnability, while task->exit_state are
-- * about the task exiting. Confusing, but this way
-- * modifying one set can't modify the other one by
-- * mistake.
-- */
--#define TASK_RUNNING 0
--#define TASK_INTERRUPTIBLE 1
--#define TASK_UNINTERRUPTIBLE 2
--#define __TASK_STOPPED 4
--#define __TASK_TRACED 8
--/* in tsk->exit_state */
--#define EXIT_ZOMBIE 16
--#define EXIT_DEAD 32
--/* in tsk->state again */
--#define TASK_DEAD 64
--#define TASK_WAKEKILL 128
--
--/* Convenience macros for the sake of set_task_state */
--#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
--#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
--#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
--
--/* Convenience macros for the sake of wake_up */
--#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
--#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
--
--/* get_task_state() */
--#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
-- TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
-- __TASK_TRACED)
--
--#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
--#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
--#define task_is_stopped_or_traced(task) \
-- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
--#define task_contributes_to_load(task) \
-- ((task->state & TASK_UNINTERRUPTIBLE) != 0)
--
--#define __set_task_state(tsk, state_value) \
-- do { (tsk)->state = (state_value); } while (0)
--#define set_task_state(tsk, state_value) \
-- set_mb((tsk)->state, (state_value))
--
--/*
-- * set_current_state() includes a barrier so that the write of current->state
-- * is correctly serialised wrt the caller's subsequent test of whether to
-- * actually sleep:
-- *
-- * set_current_state(TASK_UNINTERRUPTIBLE);
-- * if (do_i_need_to_sleep())
-- * schedule();
-- *
-- * If the caller does not need such serialisation then use __set_current_state()
-- */
--#define __set_current_state(state_value) \
-- do { current->state = (state_value); } while (0)
--#define set_current_state(state_value) \
-- set_mb(current->state, (state_value))
--
--/* Task command name length */
--#define TASK_COMM_LEN 16
--
--#include <linux/spinlock.h>
--
--/*
-- * This serializes "schedule()" and also protects
-- * the run-queue from deletions/modifications (but
-- * _adding_ to the beginning of the run-queue has
-- * a separate lock).
-- */
--extern rwlock_t tasklist_lock;
--extern spinlock_t mmlist_lock;
--
--struct task_struct;
--
--extern void sched_init(void);
--extern void sched_init_smp(void);
--extern asmlinkage void schedule_tail(struct task_struct *prev);
--extern void init_idle(struct task_struct *idle, int cpu);
--extern void init_idle_bootup_task(struct task_struct *idle);
--
--extern int runqueue_is_locked(void);
--
--extern cpumask_t nohz_cpu_mask;
--#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
--extern int select_nohz_load_balancer(int cpu);
--#else
--static inline int select_nohz_load_balancer(int cpu)
--{
-- return 0;
--}
--#endif
--
--extern unsigned long rt_needs_cpu(int cpu);
--
--/*
-- * Only dump TASK_* tasks. (0 for all tasks)
-- */
--extern void show_state_filter(unsigned long state_filter);
--
--static inline void show_state(void)
--{
-- show_state_filter(0);
--}
--
--extern void show_regs(struct pt_regs *);
--
--/*
-- * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
-- * task), SP is the stack pointer of the first frame that should be shown in the back
-- * trace (or NULL if the entire call-chain of the task should be shown).
-- */
--extern void show_stack(struct task_struct *task, unsigned long *sp);
--
--void io_schedule(void);
--long io_schedule_timeout(long timeout);
--
--extern void cpu_init (void);
--extern void trap_init(void);
--extern void account_process_tick(struct task_struct *task, int user);
--extern void update_process_times(int user);
--extern void scheduler_tick(void);
--extern void hrtick_resched(void);
--
--extern void sched_show_task(struct task_struct *p);
--
--#ifdef CONFIG_DETECT_SOFTLOCKUP
--extern void softlockup_tick(void);
--extern void touch_softlockup_watchdog(void);
--extern void touch_all_softlockup_watchdogs(void);
--extern unsigned int softlockup_panic;
--extern unsigned long sysctl_hung_task_check_count;
--extern unsigned long sysctl_hung_task_timeout_secs;
--extern unsigned long sysctl_hung_task_warnings;
--extern int softlockup_thresh;
--#else
--static inline void softlockup_tick(void)
--{
--}
--static inline void spawn_softlockup_task(void)
--{
--}
--static inline void touch_softlockup_watchdog(void)
--{
--}
--static inline void touch_all_softlockup_watchdogs(void)
--{
--}
--#endif
--
--
--/* Attach to any functions which should be ignored in wchan output. */
--#define __sched __attribute__((__section__(".sched.text")))
--
--/* Linker adds these: start and end of __sched functions */
--extern char __sched_text_start[], __sched_text_end[];
--
--/* Is this address in the __sched functions? */
--extern int in_sched_functions(unsigned long addr);
--
--#define MAX_SCHEDULE_TIMEOUT LONG_MAX
--extern signed long schedule_timeout(signed long timeout);
--extern signed long schedule_timeout_interruptible(signed long timeout);
--extern signed long schedule_timeout_killable(signed long timeout);
--extern signed long schedule_timeout_uninterruptible(signed long timeout);
--asmlinkage void schedule(void);
--
--struct nsproxy;
--struct user_namespace;
--
--/* Maximum number of active map areas.. This is a random (large) number */
--#define DEFAULT_MAX_MAP_COUNT 65536
--
--extern int sysctl_max_map_count;
--
--#include <linux/aio.h>
--
--extern unsigned long
--arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
-- unsigned long, unsigned long);
--extern unsigned long
--arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
-- unsigned long len, unsigned long pgoff,
-- unsigned long flags);
--extern void arch_unmap_area(struct mm_struct *, unsigned long);
--extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
--
--#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
--/*
-- * The mm counters are not protected by its page_table_lock,
-- * so must be incremented atomically.
-- */
--#define __set_mm_counter(mm, member, value) \
-- atomic_long_set(&(mm)->_##member, value)
--#define get_mm_counter(mm, member) \
-- ((unsigned long)atomic_long_read(&(mm)->_##member))
--#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
--/*
-- * The mm counters are protected by its page_table_lock,
-- * so can be incremented directly.
-- */
--#define __set_mm_counter(mm, member, value) (mm)->_##member = (value)
--#define get_mm_counter(mm, member) ((mm)->_##member)
--
--#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
--
--#define set_mm_counter(mm, member, value) \
-- vx_ ## member ## pages_sub((mm), (get_mm_counter(mm, member) - value))
--#define add_mm_counter(mm, member, value) \
-- vx_ ## member ## pages_add((mm), (value))
--#define inc_mm_counter(mm, member) vx_ ## member ## pages_inc((mm))
--#define dec_mm_counter(mm, member) vx_ ## member ## pages_dec((mm))
--
--#define get_mm_rss(mm) \
-- (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
--#define update_hiwater_rss(mm) do { \
-- unsigned long _rss = get_mm_rss(mm); \
-- if ((mm)->hiwater_rss < _rss) \
-- (mm)->hiwater_rss = _rss; \
--} while (0)
--#define update_hiwater_vm(mm) do { \
-- if ((mm)->hiwater_vm < (mm)->total_vm) \
-- (mm)->hiwater_vm = (mm)->total_vm; \
--} while (0)
--
--extern void set_dumpable(struct mm_struct *mm, int value);
--extern int get_dumpable(struct mm_struct *mm);
--
--/* mm flags */
--/* dumpable bits */
--#define MMF_DUMPABLE 0 /* core dump is permitted */
--#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */
--#define MMF_DUMPABLE_BITS 2
--
--/* coredump filter bits */
--#define MMF_DUMP_ANON_PRIVATE 2
--#define MMF_DUMP_ANON_SHARED 3
--#define MMF_DUMP_MAPPED_PRIVATE 4
--#define MMF_DUMP_MAPPED_SHARED 5
--#define MMF_DUMP_ELF_HEADERS 6
--#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
--#define MMF_DUMP_FILTER_BITS 5
--#define MMF_DUMP_FILTER_MASK \
-- (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
--#define MMF_DUMP_FILTER_DEFAULT \
-- ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED))
--
--struct sighand_struct {
-- atomic_t count;
-- struct k_sigaction action[_NSIG];
-- spinlock_t siglock;
-- wait_queue_head_t signalfd_wqh;
--};
--
--struct pacct_struct {
-- int ac_flag;
-- long ac_exitcode;
-- unsigned long ac_mem;
-- cputime_t ac_utime, ac_stime;
-- unsigned long ac_minflt, ac_majflt;
--};
--
--/*
-- * NOTE! "signal_struct" does not have it's own
-- * locking, because a shared signal_struct always
-- * implies a shared sighand_struct, so locking
-- * sighand_struct is always a proper superset of
-- * the locking of signal_struct.
-- */
--struct signal_struct {
-- atomic_t count;
-- atomic_t live;
--
-- wait_queue_head_t wait_chldexit; /* for wait4() */
--
-- /* current thread group signal load-balancing target: */
-- struct task_struct *curr_target;
--
-- /* shared signal handling: */
-- struct sigpending shared_pending;
--
-- /* thread group exit support */
-- int group_exit_code;
-- /* overloaded:
-- * - notify group_exit_task when ->count is equal to notify_count
-- * - everyone except group_exit_task is stopped during signal delivery
-- * of fatal signals, group_exit_task processes the signal.
-- */
-- struct task_struct *group_exit_task;
-- int notify_count;
--
-- /* thread group stop support, overloads group_exit_code too */
-- int group_stop_count;
-- unsigned int flags; /* see SIGNAL_* flags below */
--
-- /* POSIX.1b Interval Timers */
-- struct list_head posix_timers;
--
-- /* ITIMER_REAL timer for the process */
-- struct hrtimer real_timer;
-- struct pid *leader_pid;
-- ktime_t it_real_incr;
--
-- /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
-- cputime_t it_prof_expires, it_virt_expires;
-- cputime_t it_prof_incr, it_virt_incr;
--
-- /* job control IDs */
--
-- /*
-- * pgrp and session fields are deprecated.
-- * use the task_session_Xnr and task_pgrp_Xnr routines below
-- */
--
-- union {
-- pid_t pgrp __deprecated;
-- pid_t __pgrp;
-- };
--
-- struct pid *tty_old_pgrp;
--
-- union {
-- pid_t session __deprecated;
-- pid_t __session;
-- };
--
-- /* boolean value for session group leader */
-- int leader;
--
-- struct tty_struct *tty; /* NULL if no tty */
--
-- /*
-- * Cumulative resource counters for dead threads in the group,
-- * and for reaped dead child processes forked by this group.
-- * Live threads maintain their own counters and add to these
-- * in __exit_signal, except for the group leader.
-- */
-- cputime_t utime, stime, cutime, cstime;
-- cputime_t gtime;
-- cputime_t cgtime;
-- unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
-- unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
-- unsigned long inblock, oublock, cinblock, coublock;
-- struct task_io_accounting ioac;
--
-- /*
-- * Cumulative ns of scheduled CPU time for dead threads in the
-- * group, not including a zombie group leader. (This only differs
-- * from jiffies_to_ns(utime + stime) if sched_clock uses something
-- * other than jiffies.)
-- */
-- unsigned long long sum_sched_runtime;
--
-- /*
-- * We don't bother to synchronize most readers of this at all,
-- * because there is no reader checking a limit that actually needs
-- * to get both rlim_cur and rlim_max atomically, and either one
-- * alone is a single word that can safely be read normally.
-- * getrlimit/setrlimit use task_lock(current->group_leader) to
-- * protect this instead of the siglock, because they really
-- * have no need to disable irqs.
-- */
-- struct rlimit rlim[RLIM_NLIMITS];
--
-- struct list_head cpu_timers[3];
--
-- /* keep the process-shared keyrings here so that they do the right
-- * thing in threads created with CLONE_THREAD */
--#ifdef CONFIG_KEYS
-- struct key *session_keyring; /* keyring inherited over fork */
-- struct key *process_keyring; /* keyring private to this process */
--#endif
--#ifdef CONFIG_BSD_PROCESS_ACCT
-- struct pacct_struct pacct; /* per-process accounting information */
--#endif
--#ifdef CONFIG_TASKSTATS
-- struct taskstats *stats;
--#endif
--#ifdef CONFIG_AUDIT
-- unsigned audit_tty;
-- struct tty_audit_buf *tty_audit_buf;
--#endif
--};
--
--/* Context switch must be unlocked if interrupts are to be enabled */
--#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
--# define __ARCH_WANT_UNLOCKED_CTXSW
--#endif
--
--/*
-- * Bits in flags field of signal_struct.
-- */
--#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
--#define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */
--#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */
--#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */
--/*
-- * Pending notifications to parent.
-- */
--#define SIGNAL_CLD_STOPPED 0x00000010
--#define SIGNAL_CLD_CONTINUED 0x00000020
--#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
--
--#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
--
--/* If true, all threads except ->group_exit_task have pending SIGKILL */
--static inline int signal_group_exit(const struct signal_struct *sig)
--{
-- return (sig->flags & SIGNAL_GROUP_EXIT) ||
-- (sig->group_exit_task != NULL);
--}
--
--/*
-- * Some day this will be a full-fledged user tracking system..
-- */
--struct user_struct {
-- atomic_t __count; /* reference count */
-- atomic_t processes; /* How many processes does this user have? */
-- atomic_t files; /* How many open files does this user have? */
-- atomic_t sigpending; /* How many pending signals does this user have? */
--#ifdef CONFIG_INOTIFY_USER
-- atomic_t inotify_watches; /* How many inotify watches does this user have? */
-- atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
--#endif
--#ifdef CONFIG_EPOLL
-- atomic_t epoll_watches; /* The number of file descriptors currently watched */
--#endif
--#ifdef CONFIG_POSIX_MQUEUE
-- /* protected by mq_lock */
-- unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
--#endif
-- unsigned long locked_shm; /* How many pages of mlocked shm ? */
--
--#ifdef CONFIG_KEYS
-- struct key *uid_keyring; /* UID specific keyring */
-- struct key *session_keyring; /* UID's default session keyring */
--#endif
--
-- /* Hash table maintenance information */
-- struct hlist_node uidhash_node;
-- uid_t uid;
--
--#ifdef CONFIG_USER_SCHED
-- struct task_group *tg;
--#ifdef CONFIG_SYSFS
-- struct kobject kobj;
-- struct work_struct work;
--#endif
--#endif
--};
--
--extern int uids_sysfs_init(void);
--
--extern struct user_struct *find_user(uid_t);
--
--extern struct user_struct root_user;
--#define INIT_USER (&root_user)
--
--struct backing_dev_info;
--struct reclaim_state;
--
--#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
--struct sched_info {
-- /* cumulative counters */
-- unsigned long pcount; /* # of times run on this cpu */
-- unsigned long long cpu_time, /* time spent on the cpu */
-- run_delay; /* time spent waiting on a runqueue */
--
-- /* timestamps */
-- unsigned long long last_arrival,/* when we last ran on a cpu */
-- last_queued; /* when we were last queued to run */
--#ifdef CONFIG_SCHEDSTATS
-- /* BKL stats */
-- unsigned int bkl_count;
--#endif
--};
--#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
--
--#ifdef CONFIG_SCHEDSTATS
--extern const struct file_operations proc_schedstat_operations;
--#endif /* CONFIG_SCHEDSTATS */
--
--#ifdef CONFIG_TASK_DELAY_ACCT
--struct task_delay_info {
-- spinlock_t lock;
-- unsigned int flags; /* Private per-task flags */
--
-- /* For each stat XXX, add following, aligned appropriately
-- *
-- * struct timespec XXX_start, XXX_end;
-- * u64 XXX_delay;
-- * u32 XXX_count;
-- *
-- * Atomicity of updates to XXX_delay, XXX_count protected by
-- * single lock above (split into XXX_lock if contention is an issue).
-- */
--
-- /*
-- * XXX_count is incremented on every XXX operation, the delay
-- * associated with the operation is added to XXX_delay.
-- * XXX_delay contains the accumulated delay time in nanoseconds.
-- */
-- struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
-- u64 blkio_delay; /* wait for sync block io completion */
-- u64 swapin_delay; /* wait for swapin block io completion */
-- u32 blkio_count; /* total count of the number of sync block */
-- /* io operations performed */
-- u32 swapin_count; /* total count of the number of swapin block */
-- /* io operations performed */
--
-- struct timespec freepages_start, freepages_end;
-- u64 freepages_delay; /* wait for memory reclaim */
-- u32 freepages_count; /* total count of memory reclaim */
--};
--#endif /* CONFIG_TASK_DELAY_ACCT */
--
--static inline int sched_info_on(void)
--{
--#ifdef CONFIG_SCHEDSTATS
-- return 1;
--#elif defined(CONFIG_TASK_DELAY_ACCT)
-- extern int delayacct_on;
-- return delayacct_on;
--#else
-- return 0;
--#endif
--}
--
--enum cpu_idle_type {
-- CPU_IDLE,
-- CPU_NOT_IDLE,
-- CPU_NEWLY_IDLE,
-- CPU_MAX_IDLE_TYPES
--};
--
--/*
-- * sched-domains (multiprocessor balancing) declarations:
-- */
--
--/*
-- * Increase resolution of nice-level calculations:
-- */
--#define SCHED_LOAD_SHIFT 10
--#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
--
--#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE
--
--#ifdef CONFIG_SMP
--#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
--#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */
--#define SD_BALANCE_EXEC 4 /* Balance on exec */
--#define SD_BALANCE_FORK 8 /* Balance on fork, clone */
--#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */
--#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */
--#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */
--#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
--#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
--#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
--#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
--#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
--
--#define BALANCE_FOR_MC_POWER \
-- (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
--
--#define BALANCE_FOR_PKG_POWER \
-- ((sched_mc_power_savings || sched_smt_power_savings) ? \
-- SD_POWERSAVINGS_BALANCE : 0)
--
--#define test_sd_parent(sd, flag) ((sd->parent && \
-- (sd->parent->flags & flag)) ? 1 : 0)
--
--
--struct sched_group {
-- struct sched_group *next; /* Must be a circular list */
-- cpumask_t cpumask;
--
-- /*
-- * CPU power of this group, SCHED_LOAD_SCALE being max power for a
-- * single CPU. This is read only (except for setup, hotplug CPU).
-- * Note : Never change cpu_power without recompute its reciprocal
-- */
-- unsigned int __cpu_power;
-- /*
-- * reciprocal value of cpu_power to avoid expensive divides
-- * (see include/linux/reciprocal_div.h)
-- */
-- u32 reciprocal_cpu_power;
--};
--
--enum sched_domain_level {
-- SD_LV_NONE = 0,
-- SD_LV_SIBLING,
-- SD_LV_MC,
-- SD_LV_CPU,
-- SD_LV_NODE,
-- SD_LV_ALLNODES,
-- SD_LV_MAX
--};
--
--struct sched_domain_attr {
-- int relax_domain_level;
--};
--
--#define SD_ATTR_INIT (struct sched_domain_attr) { \
-- .relax_domain_level = -1, \
--}
--
--struct sched_domain {
-- /* These fields must be setup */
-- struct sched_domain *parent; /* top domain must be null terminated */
-- struct sched_domain *child; /* bottom domain must be null terminated */
-- struct sched_group *groups; /* the balancing groups of the domain */
-- cpumask_t span; /* span of all CPUs in this domain */
-- unsigned long min_interval; /* Minimum balance interval ms */
-- unsigned long max_interval; /* Maximum balance interval ms */
-- unsigned int busy_factor; /* less balancing by factor if busy */
-- unsigned int imbalance_pct; /* No balance until over watermark */
-- unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
-- unsigned int busy_idx;
-- unsigned int idle_idx;
-- unsigned int newidle_idx;
-- unsigned int wake_idx;
-- unsigned int forkexec_idx;
-- int flags; /* See SD_* */
-- enum sched_domain_level level;
--
-- /* Runtime fields. */
-- unsigned long last_balance; /* init to jiffies. units in jiffies */
-- unsigned int balance_interval; /* initialise to 1. units in ms. */
-- unsigned int nr_balance_failed; /* initialise to 0 */
--
-- u64 last_update;
--
--#ifdef CONFIG_SCHEDSTATS
-- /* load_balance() stats */
-- unsigned int lb_count[CPU_MAX_IDLE_TYPES];
-- unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
-- unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
-- unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
-- unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
-- unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
-- unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
-- unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
--
-- /* Active load balancing */
-- unsigned int alb_count;
-- unsigned int alb_failed;
-- unsigned int alb_pushed;
--
-- /* SD_BALANCE_EXEC stats */
-- unsigned int sbe_count;
-- unsigned int sbe_balanced;
-- unsigned int sbe_pushed;
--
-- /* SD_BALANCE_FORK stats */
-- unsigned int sbf_count;
-- unsigned int sbf_balanced;
-- unsigned int sbf_pushed;
--
-- /* try_to_wake_up() stats */
-- unsigned int ttwu_wake_remote;
-- unsigned int ttwu_move_affine;
-- unsigned int ttwu_move_balance;
--#endif
--};
--
--extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
-- struct sched_domain_attr *dattr_new);
--extern int arch_reinit_sched_domains(void);
--
--#else /* CONFIG_SMP */
--
--struct sched_domain_attr;
--
--static inline void
--partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
-- struct sched_domain_attr *dattr_new)
--{
--}
--#endif /* !CONFIG_SMP */
--
--struct io_context; /* See blkdev.h */
--#define NGROUPS_SMALL 32
--#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
--struct group_info {
-- int ngroups;
-- atomic_t usage;
-- gid_t small_block[NGROUPS_SMALL];
-- int nblocks;
-- gid_t *blocks[0];
--};
--
--/*
-- * get_group_info() must be called with the owning task locked (via task_lock())
-- * when task != current. The reason being that the vast majority of callers are
-- * looking at current->group_info, which can not be changed except by the
-- * current task. Changing current->group_info requires the task lock, too.
-- */
--#define get_group_info(group_info) do { \
-- atomic_inc(&(group_info)->usage); \
--} while (0)
--
--#define put_group_info(group_info) do { \
-- if (atomic_dec_and_test(&(group_info)->usage)) \
-- groups_free(group_info); \
--} while (0)
--
--extern struct group_info *groups_alloc(int gidsetsize);
--extern void groups_free(struct group_info *group_info);
--extern int set_current_groups(struct group_info *group_info);
--extern int groups_search(struct group_info *group_info, gid_t grp);
--/* access the groups "array" with this macro */
--#define GROUP_AT(gi, i) \
-- ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
--
--#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
--extern void prefetch_stack(struct task_struct *t);
--#else
--static inline void prefetch_stack(struct task_struct *t) { }
--#endif
--
--struct audit_context; /* See audit.c */
--struct mempolicy;
--struct pipe_inode_info;
--struct uts_namespace;
--
--struct rq;
--struct sched_domain;
--
--struct sched_class {
-- const struct sched_class *next;
--
-- void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
-- void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
-- void (*yield_task) (struct rq *rq);
-- int (*select_task_rq)(struct task_struct *p, int sync);
--
-- void (*check_preempt_curr) (struct rq *rq, struct task_struct *p);
--
-- struct task_struct * (*pick_next_task) (struct rq *rq);
-- void (*put_prev_task) (struct rq *rq, struct task_struct *p);
--
--#ifdef CONFIG_SMP
-- unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
-- struct rq *busiest, unsigned long max_load_move,
-- struct sched_domain *sd, enum cpu_idle_type idle,
-- int *all_pinned, int *this_best_prio);
--
-- int (*move_one_task) (struct rq *this_rq, int this_cpu,
-- struct rq *busiest, struct sched_domain *sd,
-- enum cpu_idle_type idle);
-- void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
-- void (*post_schedule) (struct rq *this_rq);
-- void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
--#endif
--
-- void (*set_curr_task) (struct rq *rq);
-- void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
-- void (*task_new) (struct rq *rq, struct task_struct *p);
-- void (*set_cpus_allowed)(struct task_struct *p,
-- const cpumask_t *newmask);
--
-- void (*rq_online)(struct rq *rq);
-- void (*rq_offline)(struct rq *rq);
--
-- void (*switched_from) (struct rq *this_rq, struct task_struct *task,
-- int running);
-- void (*switched_to) (struct rq *this_rq, struct task_struct *task,
-- int running);
-- void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
-- int oldprio, int running);
--
--#ifdef CONFIG_FAIR_GROUP_SCHED
-- void (*moved_group) (struct task_struct *p);
--#endif
--};
--
--struct load_weight {
-- unsigned long weight, inv_weight;
--};
--
--/*
-- * CFS stats for a schedulable entity (task, task-group etc)
-- *
-- * Current field usage histogram:
-- *
-- * 4 se->block_start
-- * 4 se->run_node
-- * 4 se->sleep_start
-- * 6 se->load.weight
-- */
--struct sched_entity {
-- struct load_weight load; /* for load-balancing */
-- struct rb_node run_node;
-- struct list_head group_node;
-- unsigned int on_rq;
--
-- u64 exec_start;
-- u64 sum_exec_runtime;
-- u64 vruntime;
-- u64 prev_sum_exec_runtime;
--
-- u64 last_wakeup;
-- u64 avg_overlap;
--
--#ifdef CONFIG_SCHEDSTATS
-- u64 wait_start;
-- u64 wait_max;
-- u64 wait_count;
-- u64 wait_sum;
--
-- u64 sleep_start;
-- u64 sleep_max;
-- s64 sum_sleep_runtime;
--
-- u64 block_start;
-- u64 block_max;
-- u64 exec_max;
-- u64 slice_max;
--
-- u64 nr_migrations;
-- u64 nr_migrations_cold;
-- u64 nr_failed_migrations_affine;
-- u64 nr_failed_migrations_running;
-- u64 nr_failed_migrations_hot;
-- u64 nr_forced_migrations;
-- u64 nr_forced2_migrations;
--
-- u64 nr_wakeups;
-- u64 nr_wakeups_sync;
-- u64 nr_wakeups_migrate;
-- u64 nr_wakeups_local;
-- u64 nr_wakeups_remote;
-- u64 nr_wakeups_affine;
-- u64 nr_wakeups_affine_attempts;
-- u64 nr_wakeups_passive;
-- u64 nr_wakeups_idle;
--#endif
--
--#ifdef CONFIG_FAIR_GROUP_SCHED
-- struct sched_entity *parent;
-- /* rq on which this entity is (to be) queued: */
-- struct cfs_rq *cfs_rq;
-- /* rq "owned" by this entity/group: */
-- struct cfs_rq *my_q;
--#endif
--};
--
--struct sched_rt_entity {
-- struct list_head run_list;
-- unsigned int time_slice;
-- unsigned long timeout;
-- int nr_cpus_allowed;
--
-- struct sched_rt_entity *back;
--#ifdef CONFIG_RT_GROUP_SCHED
-- struct sched_rt_entity *parent;
-- /* rq on which this entity is (to be) queued: */
-- struct rt_rq *rt_rq;
-- /* rq "owned" by this entity/group: */
-- struct rt_rq *my_q;
--#endif
--};
--
--struct task_struct {
-- volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
-- void *stack;
-- atomic_t usage;
-- unsigned int flags; /* per process flags, defined below */
-- unsigned int ptrace;
--
-- int lock_depth; /* BKL lock depth */
--
--#ifdef CONFIG_SMP
--#ifdef __ARCH_WANT_UNLOCKED_CTXSW
-- int oncpu;
--#endif
--#endif
--
-- int prio, static_prio, normal_prio;
-- unsigned int rt_priority;
-- const struct sched_class *sched_class;
-- struct sched_entity se;
-- struct sched_rt_entity rt;
--
--#ifdef CONFIG_PREEMPT_NOTIFIERS
-- /* list of struct preempt_notifier: */
-- struct hlist_head preempt_notifiers;
--#endif
--
-- /*
-- * fpu_counter contains the number of consecutive context switches
-- * that the FPU is used. If this is over a threshold, the lazy fpu
-- * saving becomes unlazy to save the trap. This is an unsigned char
-- * so that after 256 times the counter wraps and the behavior turns
-- * lazy again; this to deal with bursty apps that only use FPU for
-- * a short time
-- */
-- unsigned char fpu_counter;
-- s8 oomkilladj; /* OOM kill score adjustment (bit shift). */
--#ifdef CONFIG_BLK_DEV_IO_TRACE
-- unsigned int btrace_seq;
--#endif
--
-- unsigned int policy;
-- cpumask_t cpus_allowed;
--
--#ifdef CONFIG_PREEMPT_RCU
-- int rcu_read_lock_nesting;
-- int rcu_flipctr_idx;
--#endif /* #ifdef CONFIG_PREEMPT_RCU */
--
--#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
-- struct sched_info sched_info;
--#endif
--
-- struct list_head tasks;
--
-- struct mm_struct *mm, *active_mm;
--
--/* task state */
-- struct linux_binfmt *binfmt;
-- int exit_state;
-- int exit_code, exit_signal;
-- int pdeath_signal; /* The signal sent when the parent dies */
-- /* ??? */
-- unsigned int personality;
-- unsigned did_exec:1;
-- pid_t pid;
-- pid_t tgid;
--
--#ifdef CONFIG_CC_STACKPROTECTOR
-- /* Canary value for the -fstack-protector gcc feature */
-- unsigned long stack_canary;
--#endif
-- /*
-- * pointers to (original) parent process, youngest child, younger sibling,
-- * older sibling, respectively. (p->father can be replaced with
-- * p->real_parent->pid)
-- */
-- struct task_struct *real_parent; /* real parent process */
-- struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
-- /*
-- * children/sibling forms the list of my natural children
-- */
-- struct list_head children; /* list of my children */
-- struct list_head sibling; /* linkage in my parent's children list */
-- struct task_struct *group_leader; /* threadgroup leader */
--
-- /*
-- * ptraced is the list of tasks this task is using ptrace on.
-- * This includes both natural children and PTRACE_ATTACH targets.
-- * p->ptrace_entry is p's link on the p->parent->ptraced list.
-- */
-- struct list_head ptraced;
-- struct list_head ptrace_entry;
--
-- /* PID/PID hash table linkage. */
-- struct pid_link pids[PIDTYPE_MAX];
-- struct list_head thread_group;
--
-- struct completion *vfork_done; /* for vfork() */
-- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
-- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
--
-- cputime_t utime, stime, utimescaled, stimescaled;
-- cputime_t gtime;
-- cputime_t prev_utime, prev_stime;
-- unsigned long nvcsw, nivcsw; /* context switch counts */
-- struct timespec start_time; /* monotonic time */
-- struct timespec real_start_time; /* boot based time */
--/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
-- unsigned long min_flt, maj_flt;
--
-- cputime_t it_prof_expires, it_virt_expires;
-- unsigned long long it_sched_expires;
-- struct list_head cpu_timers[3];
--
--/* process credentials */
-- uid_t uid,euid,suid,fsuid;
-- gid_t gid,egid,sgid,fsgid;
-- struct group_info *group_info;
-- kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
-- struct user_struct *user;
-- unsigned securebits;
--#ifdef CONFIG_KEYS
-- unsigned char jit_keyring; /* default keyring to attach requested keys to */
-- struct key *request_key_auth; /* assumed request_key authority */
-- struct key *thread_keyring; /* keyring private to this thread */
--#endif
-- char comm[TASK_COMM_LEN]; /* executable name excluding path
-- - access with [gs]et_task_comm (which lock
-- it with task_lock())
-- - initialized normally by flush_old_exec */
--/* file system info */
-- int link_count, total_link_count;
--#ifdef CONFIG_SYSVIPC
--/* ipc stuff */
-- struct sysv_sem sysvsem;
--#endif
--#ifdef CONFIG_DETECT_SOFTLOCKUP
--/* hung task detection */
-- unsigned long last_switch_timestamp;
-- unsigned long last_switch_count;
--#endif
--/* CPU-specific state of this task */
-- struct thread_struct thread;
--/* filesystem information */
-- struct fs_struct *fs;
--/* open file information */
-- struct files_struct *files;
--/* namespaces */
-- struct nsproxy *nsproxy;
--/* signal handlers */
-- struct signal_struct *signal;
-- struct sighand_struct *sighand;
--
-- sigset_t blocked, real_blocked;
-- sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
-- struct sigpending pending;
--
-- unsigned long sas_ss_sp;
-- size_t sas_ss_size;
-- int (*notifier)(void *priv);
-- void *notifier_data;
-- sigset_t *notifier_mask;
--#ifdef CONFIG_SECURITY
-- void *security;
--#endif
-- struct audit_context *audit_context;
--#ifdef CONFIG_AUDITSYSCALL
-- uid_t loginuid;
-- unsigned int sessionid;
--#endif
-- seccomp_t seccomp;
--
--/* vserver context data */
-- struct vx_info *vx_info;
-- struct nx_info *nx_info;
--
-- xid_t xid;
-- nid_t nid;
-- tag_t tag;
--
--/* Thread group tracking */
-- u32 parent_exec_id;
-- u32 self_exec_id;
--/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
-- spinlock_t alloc_lock;
--
-- /* Protection of the PI data structures: */
-- spinlock_t pi_lock;
--
--#ifdef CONFIG_RT_MUTEXES
-- /* PI waiters blocked on a rt_mutex held by this task */
-- struct plist_head pi_waiters;
-- /* Deadlock detection and priority inheritance handling */
-- struct rt_mutex_waiter *pi_blocked_on;
--#endif
--
--#ifdef CONFIG_DEBUG_MUTEXES
-- /* mutex deadlock detection */
-- struct mutex_waiter *blocked_on;
--#endif
--#ifdef CONFIG_TRACE_IRQFLAGS
-- unsigned int irq_events;
-- int hardirqs_enabled;
-- unsigned long hardirq_enable_ip;
-- unsigned int hardirq_enable_event;
-- unsigned long hardirq_disable_ip;
-- unsigned int hardirq_disable_event;
-- int softirqs_enabled;
-- unsigned long softirq_disable_ip;
-- unsigned int softirq_disable_event;
-- unsigned long softirq_enable_ip;
-- unsigned int softirq_enable_event;
-- int hardirq_context;
-- int softirq_context;
--#endif
--#ifdef CONFIG_LOCKDEP
--# define MAX_LOCK_DEPTH 48UL
-- u64 curr_chain_key;
-- int lockdep_depth;
-- unsigned int lockdep_recursion;
-- struct held_lock held_locks[MAX_LOCK_DEPTH];
--#endif
--
--/* journalling filesystem info */
-- void *journal_info;
--
--/* stacked block device info */
-- struct bio *bio_list, **bio_tail;
--
--/* VM state */
-- struct reclaim_state *reclaim_state;
--
-- struct backing_dev_info *backing_dev_info;
--
-- struct io_context *io_context;
--
-- unsigned long ptrace_message;
-- siginfo_t *last_siginfo; /* For ptrace use. */
-- struct task_io_accounting ioac;
--#if defined(CONFIG_TASK_XACCT)
-- u64 acct_rss_mem1; /* accumulated rss usage */
-- u64 acct_vm_mem1; /* accumulated virtual memory usage */
-- cputime_t acct_timexpd; /* stime + utime since last update */
--#endif
--#ifdef CONFIG_CPUSETS
-- nodemask_t mems_allowed;
-- int cpuset_mems_generation;
-- int cpuset_mem_spread_rotor;
--#endif
--#ifdef CONFIG_CGROUPS
-- /* Control Group info protected by css_set_lock */
-- struct css_set *cgroups;
-- /* cg_list protected by css_set_lock and tsk->alloc_lock */
-- struct list_head cg_list;
--#endif
--#ifdef CONFIG_FUTEX
-- struct robust_list_head __user *robust_list;
--#ifdef CONFIG_COMPAT
-- struct compat_robust_list_head __user *compat_robust_list;
--#endif
-- struct list_head pi_state_list;
-- struct futex_pi_state *pi_state_cache;
--#endif
--#ifdef CONFIG_NUMA
-- struct mempolicy *mempolicy;
-- short il_next;
--#endif
-- atomic_t fs_excl; /* holding fs exclusive resources */
-- struct rcu_head rcu;
--
-- struct list_head *scm_work_list;
--
--/*
-- * cache last used pipe for splice
-- */
-- struct pipe_inode_info *splice_pipe;
--#ifdef CONFIG_TASK_DELAY_ACCT
-- struct task_delay_info *delays;
--#endif
--#ifdef CONFIG_FAULT_INJECTION
-- int make_it_fail;
--#endif
-- struct prop_local_single dirties;
--#ifdef CONFIG_LATENCYTOP
-- int latency_record_count;
-- struct latency_record latency_record[LT_SAVECOUNT];
--#endif
--};
--
--/*
-- * Priority of a process goes from 0..MAX_PRIO-1, valid RT
-- * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
-- * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
-- * values are inverted: lower p->prio value means higher priority.
-- *
-- * The MAX_USER_RT_PRIO value allows the actual maximum
-- * RT priority to be separate from the value exported to
-- * user-space. This allows kernel threads to set their
-- * priority to a value higher than any user task. Note:
-- * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
-- */
--
--#define MAX_USER_RT_PRIO 100
--#define MAX_RT_PRIO MAX_USER_RT_PRIO
--
--#define MAX_PRIO (MAX_RT_PRIO + 40)
--#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
--
--static inline int rt_prio(int prio)
--{
-- if (unlikely(prio < MAX_RT_PRIO))
-- return 1;
-- return 0;
--}
--
--static inline int rt_task(struct task_struct *p)
--{
-- return rt_prio(p->prio);
--}
--
--static inline void set_task_session(struct task_struct *tsk, pid_t session)
--{
-- tsk->signal->__session = session;
--}
--
--static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
--{
-- tsk->signal->__pgrp = pgrp;
--}
--
--static inline struct pid *task_pid(struct task_struct *task)
--{
-- return task->pids[PIDTYPE_PID].pid;
--}
--
--static inline struct pid *task_tgid(struct task_struct *task)
--{
-- return task->group_leader->pids[PIDTYPE_PID].pid;
--}
--
--static inline struct pid *task_pgrp(struct task_struct *task)
--{
-- return task->group_leader->pids[PIDTYPE_PGID].pid;
--}
--
--static inline struct pid *task_session(struct task_struct *task)
--{
-- return task->group_leader->pids[PIDTYPE_SID].pid;
--}
--
--struct pid_namespace;
--
--/*
-- * the helpers to get the task's different pids as they are seen
-- * from various namespaces
-- *
-- * task_xid_nr() : global id, i.e. the id seen from the init namespace;
-- * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
-- * current.
-- * task_xid_nr_ns() : id seen from the ns specified;
-- *
-- * set_task_vxid() : assigns a virtual id to a task;
-- *
-- * see also pid_nr() etc in include/linux/pid.h
-- */
--
--#include <linux/vserver/base.h>
--#include <linux/vserver/context.h>
--#include <linux/vserver/debug.h>
--#include <linux/vserver/pid.h>
--
--static inline pid_t task_pid_nr(struct task_struct *tsk)
--{
-- return tsk->pid;
--}
--
--pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
--
--static inline pid_t task_pid_vnr(struct task_struct *tsk)
--{
-- return vx_map_pid(pid_vnr(task_pid(tsk)));
--}
--
--
--static inline pid_t task_tgid_nr(struct task_struct *tsk)
--{
-- return tsk->tgid;
--}
--
--pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
--
--static inline pid_t task_tgid_vnr(struct task_struct *tsk)
--{
-- return vx_map_tgid(pid_vnr(task_tgid(tsk)));
--}
--
--
--static inline pid_t task_pgrp_nr(struct task_struct *tsk)
--{
-- return tsk->signal->__pgrp;
--}
--
--pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
--
--static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
--{
-- return pid_vnr(task_pgrp(tsk));
--}
--
--
--static inline pid_t task_session_nr(struct task_struct *tsk)
--{
-- return tsk->signal->__session;
--}
--
--pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
--
--static inline pid_t task_session_vnr(struct task_struct *tsk)
--{
-- return pid_vnr(task_session(tsk));
--}
--
--
--/**
-- * pid_alive - check that a task structure is not stale
-- * @p: Task structure to be checked.
-- *
-- * Test if a process is not yet dead (at most zombie state)
-- * If pid_alive fails, then pointers within the task structure
-- * can be stale and must not be dereferenced.
-- */
--static inline int pid_alive(struct task_struct *p)
--{
-- return p->pids[PIDTYPE_PID].pid != NULL;
--}
--
--/**
-- * is_global_init - check if a task structure is init
-- * @tsk: Task structure to be checked.
-- *
-- * Check if a task structure is the first user space task the kernel created.
-- */
--static inline int is_global_init(struct task_struct *tsk)
--{
-- return tsk->pid == 1;
--}
--
--/*
-- * is_container_init:
-- * check whether in the task is init in its own pid namespace.
-- */
--extern int is_container_init(struct task_struct *tsk);
--
--extern struct pid *cad_pid;
--
--extern void free_task(struct task_struct *tsk);
--#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
--
--extern void __put_task_struct(struct task_struct *t);
--
--static inline void put_task_struct(struct task_struct *t)
--{
-- if (atomic_dec_and_test(&t->usage))
-- __put_task_struct(t);
--}
--
--extern cputime_t task_utime(struct task_struct *p);
--extern cputime_t task_stime(struct task_struct *p);
--extern cputime_t task_gtime(struct task_struct *p);
--
--/*
-- * Per process flags
-- */
--#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
-- /* Not implemented yet, only for 486*/
--#define PF_STARTING 0x00000002 /* being created */
--#define PF_EXITING 0x00000004 /* getting shut down */
--#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
--#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
--#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
--#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
--#define PF_DUMPCORE 0x00000200 /* dumped core */
--#define PF_SIGNALED 0x00000400 /* killed by a signal */
--#define PF_MEMALLOC 0x00000800 /* Allocating memory */
--#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
--#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
--#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
--#define PF_FROZEN 0x00010000 /* frozen for system suspend */
--#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
--#define PF_KSWAPD 0x00040000 /* I am kswapd */
--#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
--#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
--#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
--#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
--#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
--#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
--#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
--#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
--#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
--#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
--#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
--#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
--
--/*
-- * Only the _current_ task can read/write to tsk->flags, but other
-- * tasks can access tsk->flags in readonly mode for example
-- * with tsk_used_math (like during threaded core dumping).
-- * There is however an exception to this rule during ptrace
-- * or during fork: the ptracer task is allowed to write to the
-- * child->flags of its traced child (same goes for fork, the parent
-- * can write to the child->flags), because we're guaranteed the
-- * child is not running and in turn not changing child->flags
-- * at the same time the parent does it.
-- */
--#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
--#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
--#define clear_used_math() clear_stopped_child_used_math(current)
--#define set_used_math() set_stopped_child_used_math(current)
--#define conditional_stopped_child_used_math(condition, child) \
-- do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
--#define conditional_used_math(condition) \
-- conditional_stopped_child_used_math(condition, current)
--#define copy_to_stopped_child_used_math(child) \
-- do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
--/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
--#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
--#define used_math() tsk_used_math(current)
--
--#ifdef CONFIG_SMP
--extern int set_cpus_allowed_ptr(struct task_struct *p,
-- const cpumask_t *new_mask);
--#else
--static inline int set_cpus_allowed_ptr(struct task_struct *p,
-- const cpumask_t *new_mask)
--{
-- if (!cpu_isset(0, *new_mask))
-- return -EINVAL;
-- return 0;
--}
--#endif
--static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
--{
-- return set_cpus_allowed_ptr(p, &new_mask);
--}
--
--extern unsigned long long sched_clock(void);
--
--extern void sched_clock_init(void);
--extern u64 sched_clock_cpu(int cpu);
--
--#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
--static inline void sched_clock_tick(void)
--{
--}
--
--static inline void sched_clock_idle_sleep_event(void)
--{
--}
--
--static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
--{
--}
--#else
--extern void sched_clock_tick(void);
--extern void sched_clock_idle_sleep_event(void);
--extern void sched_clock_idle_wakeup_event(u64 delta_ns);
--#endif
--
--/*
-- * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
-- * clock constructed from sched_clock():
-- */
--extern unsigned long long cpu_clock(int cpu);
--
--extern unsigned long long
--task_sched_runtime(struct task_struct *task);
--
--/* sched_exec is called by processes performing an exec */
--#ifdef CONFIG_SMP
--extern void sched_exec(void);
--#else
--#define sched_exec() {}
--#endif
--
--extern void sched_clock_idle_sleep_event(void);
--extern void sched_clock_idle_wakeup_event(u64 delta_ns);
--
--#ifdef CONFIG_HOTPLUG_CPU
--extern void idle_task_exit(void);
--#else
--static inline void idle_task_exit(void) {}
--#endif
--
--extern void sched_idle_next(void);
--
--#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
--extern void wake_up_idle_cpu(int cpu);
--#else
--static inline void wake_up_idle_cpu(int cpu) { }
--#endif
--
--#ifdef CONFIG_SCHED_DEBUG
--extern unsigned int sysctl_sched_latency;
--extern unsigned int sysctl_sched_min_granularity;
--extern unsigned int sysctl_sched_wakeup_granularity;
--extern unsigned int sysctl_sched_child_runs_first;
--extern unsigned int sysctl_sched_features;
--extern unsigned int sysctl_sched_migration_cost;
--extern unsigned int sysctl_sched_nr_migrate;
--extern unsigned int sysctl_sched_shares_ratelimit;
--
--int sched_nr_latency_handler(struct ctl_table *table, int write,
-- struct file *file, void __user *buffer, size_t *length,
-- loff_t *ppos);
--#endif
--extern unsigned int sysctl_sched_rt_period;
--extern int sysctl_sched_rt_runtime;
--
--int sched_rt_handler(struct ctl_table *table, int write,
-- struct file *filp, void __user *buffer, size_t *lenp,
-- loff_t *ppos);
--
--extern unsigned int sysctl_sched_compat_yield;
--
--#ifdef CONFIG_RT_MUTEXES
--extern int rt_mutex_getprio(struct task_struct *p);
--extern void rt_mutex_setprio(struct task_struct *p, int prio);
--extern void rt_mutex_adjust_pi(struct task_struct *p);
--#else
--static inline int rt_mutex_getprio(struct task_struct *p)
--{
-- return p->normal_prio;
--}
--# define rt_mutex_adjust_pi(p) do { } while (0)
--#endif
--
--extern void set_user_nice(struct task_struct *p, long nice);
--extern int task_prio(const struct task_struct *p);
--extern int task_nice(const struct task_struct *p);
--extern int can_nice(const struct task_struct *p, const int nice);
--extern int task_curr(const struct task_struct *p);
--extern int idle_cpu(int cpu);
--extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
--extern int sched_setscheduler_nocheck(struct task_struct *, int,
-- struct sched_param *);
--extern struct task_struct *idle_task(int cpu);
--extern struct task_struct *curr_task(int cpu);
--extern void set_curr_task(int cpu, struct task_struct *p);
--
--void yield(void);
--
--/*
-- * The default (Linux) execution domain.
-- */
--extern struct exec_domain default_exec_domain;
--
--union thread_union {
-- struct thread_info thread_info;
-- unsigned long stack[THREAD_SIZE/sizeof(long)];
--};
--
--#ifndef __HAVE_ARCH_KSTACK_END
--static inline int kstack_end(void *addr)
--{
-- /* Reliable end of stack detection:
-- * Some APM bios versions misalign the stack
-- */
-- return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
--}
--#endif
--
--extern union thread_union init_thread_union;
--extern struct task_struct init_task;
--
--extern struct mm_struct init_mm;
--
--extern struct pid_namespace init_pid_ns;
--
--/*
-- * find a task by one of its numerical ids
-- *
-- * find_task_by_pid_type_ns():
-- * it is the most generic call - it finds a task by all id,
-- * type and namespace specified
-- * find_task_by_pid_ns():
-- * finds a task by its pid in the specified namespace
-- * find_task_by_vpid():
-- * finds a task by its virtual pid
-- *
-- * see also find_vpid() etc in include/linux/pid.h
-- */
--
--extern struct task_struct *find_task_by_pid_type_ns(int type, int pid,
-- struct pid_namespace *ns);
--
--extern struct task_struct *find_task_by_vpid(pid_t nr);
--extern struct task_struct *find_task_by_pid_ns(pid_t nr,
-- struct pid_namespace *ns);
--
--extern void __set_special_pids(struct pid *pid);
--
--/* per-UID process charging. */
--extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
--static inline struct user_struct *get_uid(struct user_struct *u)
--{
-- atomic_inc(&u->__count);
-- return u;
--}
--extern void free_uid(struct user_struct *);
--extern void switch_uid(struct user_struct *);
--extern void release_uids(struct user_namespace *ns);
--
--#include <asm/current.h>
--
--extern void do_timer(unsigned long ticks);
--
--extern int wake_up_state(struct task_struct *tsk, unsigned int state);
--extern int wake_up_process(struct task_struct *tsk);
--extern void wake_up_new_task(struct task_struct *tsk,
-- unsigned long clone_flags);
--#ifdef CONFIG_SMP
-- extern void kick_process(struct task_struct *tsk);
--#else
-- static inline void kick_process(struct task_struct *tsk) { }
--#endif
--extern void sched_fork(struct task_struct *p, int clone_flags);
--extern void sched_dead(struct task_struct *p);
--
--extern int in_group_p(gid_t);
--extern int in_egroup_p(gid_t);
--
--extern void proc_caches_init(void);
--extern void flush_signals(struct task_struct *);
--extern void ignore_signals(struct task_struct *);
--extern void flush_signal_handlers(struct task_struct *, int force_default);
--extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
--
--static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
--{
-- unsigned long flags;
-- int ret;
--
-- spin_lock_irqsave(&tsk->sighand->siglock, flags);
-- ret = dequeue_signal(tsk, mask, info);
-- spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
--
-- return ret;
--}
--
--extern void block_all_signals(int (*notifier)(void *priv), void *priv,
-- sigset_t *mask);
--extern void unblock_all_signals(void);
--extern void release_task(struct task_struct * p);
--extern int send_sig_info(int, struct siginfo *, struct task_struct *);
--extern int force_sigsegv(int, struct task_struct *);
--extern int force_sig_info(int, struct siginfo *, struct task_struct *);
--extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
--extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
--extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
--extern int kill_pgrp(struct pid *pid, int sig, int priv);
--extern int kill_pid(struct pid *pid, int sig, int priv);
--extern int kill_proc_info(int, struct siginfo *, pid_t);
--extern int do_notify_parent(struct task_struct *, int);
--extern void force_sig(int, struct task_struct *);
--extern void force_sig_specific(int, struct task_struct *);
--extern int send_sig(int, struct task_struct *, int);
--extern void zap_other_threads(struct task_struct *p);
--extern struct sigqueue *sigqueue_alloc(void);
--extern void sigqueue_free(struct sigqueue *);
--extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
--extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
--extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
--
--static inline int kill_cad_pid(int sig, int priv)
--{
-- return kill_pid(cad_pid, sig, priv);
--}
--
--/* These can be the second arg to send_sig_info/send_group_sig_info. */
--#define SEND_SIG_NOINFO ((struct siginfo *) 0)
--#define SEND_SIG_PRIV ((struct siginfo *) 1)
--#define SEND_SIG_FORCED ((struct siginfo *) 2)
--
--static inline int is_si_special(const struct siginfo *info)
--{
-- return info <= SEND_SIG_FORCED;
--}
--
--/* True if we are on the alternate signal stack. */
--
--static inline int on_sig_stack(unsigned long sp)
--{
-- return (sp - current->sas_ss_sp < current->sas_ss_size);
--}
--
--static inline int sas_ss_flags(unsigned long sp)
--{
-- return (current->sas_ss_size == 0 ? SS_DISABLE
-- : on_sig_stack(sp) ? SS_ONSTACK : 0);
--}
--
--/*
-- * Routines for handling mm_structs
-- */
--extern struct mm_struct * mm_alloc(void);
--
--/* mmdrop drops the mm and the page tables */
--extern void __mmdrop(struct mm_struct *);
--static inline void mmdrop(struct mm_struct * mm)
--{
-- if (unlikely(atomic_dec_and_test(&mm->mm_count)))
-- __mmdrop(mm);
--}
--
--/* mmput gets rid of the mappings and all user-space */
--extern void mmput(struct mm_struct *);
--/* Grab a reference to a task's mm, if it is not already going away */
--extern struct mm_struct *get_task_mm(struct task_struct *task);
--/* Remove the current tasks stale references to the old mm_struct */
--extern void mm_release(struct task_struct *, struct mm_struct *);
--/* Allocate a new mm structure and copy contents from tsk->mm */
--extern struct mm_struct *dup_mm(struct task_struct *tsk);
--
--extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
--extern void flush_thread(void);
--extern void exit_thread(void);
--
--extern void exit_files(struct task_struct *);
--extern void __cleanup_signal(struct signal_struct *);
--extern void __cleanup_sighand(struct sighand_struct *);
--
--extern void exit_itimers(struct signal_struct *);
--extern void flush_itimer_signals(void);
--
--extern NORET_TYPE void do_group_exit(int);
--
--extern void daemonize(const char *, ...);
--extern int allow_signal(int);
--extern int disallow_signal(int);
--
--extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
--extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
--struct task_struct *fork_idle(int);
--
--extern void set_task_comm(struct task_struct *tsk, char *from);
--extern char *get_task_comm(char *to, struct task_struct *tsk);
--
--#ifdef CONFIG_SMP
--extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
--#else
--static inline unsigned long wait_task_inactive(struct task_struct *p,
-- long match_state)
--{
-- return 1;
--}
--#endif
--
--#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
--
--#define for_each_process(p) \
-- for (p = &init_task ; (p = next_task(p)) != &init_task ; )
--
--/*
-- * Careful: do_each_thread/while_each_thread is a double loop so
-- * 'break' will not work as expected - use goto instead.
-- */
--#define do_each_thread(g, t) \
-- for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
--
--#define while_each_thread(g, t) \
-- while ((t = next_thread(t)) != g)
--
--/* de_thread depends on thread_group_leader not being a pid based check */
--#define thread_group_leader(p) (p == p->group_leader)
--
--/* Do to the insanities of de_thread it is possible for a process
-- * to have the pid of the thread group leader without actually being
-- * the thread group leader. For iteration through the pids in proc
-- * all we care about is that we have a task with the appropriate
-- * pid, we don't actually care if we have the right task.
-- */
--static inline int has_group_leader_pid(struct task_struct *p)
--{
-- return p->pid == p->tgid;
--}
--
--static inline
--int same_thread_group(struct task_struct *p1, struct task_struct *p2)
--{
-- return p1->tgid == p2->tgid;
--}
--
--static inline struct task_struct *next_thread(const struct task_struct *p)
--{
-- return list_entry(rcu_dereference(p->thread_group.next),
-- struct task_struct, thread_group);
--}
--
--static inline int thread_group_empty(struct task_struct *p)
--{
-- return list_empty(&p->thread_group);
--}
--
--#define delay_group_leader(p) \
-- (thread_group_leader(p) && !thread_group_empty(p))
--
--/*
-- * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
-- * subscriptions and synchronises with wait4(). Also used in procfs. Also
-- * pins the final release of task.io_context. Also protects ->cpuset and
-- * ->cgroup.subsys[].
-- *
-- * Nests both inside and outside of read_lock(&tasklist_lock).
-- * It must not be nested with write_lock_irq(&tasklist_lock),
-- * neither inside nor outside.
-- */
--static inline void task_lock(struct task_struct *p)
--{
-- spin_lock(&p->alloc_lock);
--}
--
--static inline void task_unlock(struct task_struct *p)
--{
-- spin_unlock(&p->alloc_lock);
--}
--
--extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
-- unsigned long *flags);
--
--static inline void unlock_task_sighand(struct task_struct *tsk,
-- unsigned long *flags)
--{
-- spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
--}
--
--#ifndef __HAVE_THREAD_FUNCTIONS
--
--#define task_thread_info(task) ((struct thread_info *)(task)->stack)
--#define task_stack_page(task) ((task)->stack)
--
--static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
--{
-- *task_thread_info(p) = *task_thread_info(org);
-- task_thread_info(p)->task = p;
--}
--
--static inline unsigned long *end_of_stack(struct task_struct *p)
--{
-- return (unsigned long *)(task_thread_info(p) + 1);
--}
--
--#endif
--
--static inline int object_is_on_stack(void *obj)
--{
-- void *stack = task_stack_page(current);
--
-- return (obj >= stack) && (obj < (stack + THREAD_SIZE));
--}
--
--extern void thread_info_cache_init(void);
--
--/* set thread flags in other task's structures
-- * - see asm/thread_info.h for TIF_xxxx flags available
-- */
--static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
--{
-- set_ti_thread_flag(task_thread_info(tsk), flag);
--}
--
--static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
--{
-- clear_ti_thread_flag(task_thread_info(tsk), flag);
--}
--
--static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
--{
-- return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
--}
--
--static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
--{
-- return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
--}
--
--static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
--{
-- return test_ti_thread_flag(task_thread_info(tsk), flag);
--}
--
--static inline void set_tsk_need_resched(struct task_struct *tsk)
--{
-- set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
--}
--
--static inline void clear_tsk_need_resched(struct task_struct *tsk)
--{
-- clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
--}
--
--static inline int test_tsk_need_resched(struct task_struct *tsk)
--{
-- return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
--}
--
--static inline int signal_pending(struct task_struct *p)
--{
-- return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
--}
--
--extern int __fatal_signal_pending(struct task_struct *p);
--
--static inline int fatal_signal_pending(struct task_struct *p)
--{
-- return signal_pending(p) && __fatal_signal_pending(p);
--}
--
--static inline int signal_pending_state(long state, struct task_struct *p)
--{
-- if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
-- return 0;
-- if (!signal_pending(p))
-- return 0;
--
-- return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
--}
--
--static inline int need_resched(void)
--{
-- return unlikely(test_thread_flag(TIF_NEED_RESCHED));
--}
--
--/*
-- * cond_resched() and cond_resched_lock(): latency reduction via
-- * explicit rescheduling in places that are safe. The return
-- * value indicates whether a reschedule was done in fact.
-- * cond_resched_lock() will drop the spinlock before scheduling,
-- * cond_resched_softirq() will enable bhs before scheduling.
-- */
--extern int _cond_resched(void);
--#ifdef CONFIG_PREEMPT_BKL
--static inline int cond_resched(void)
--{
-- return 0;
--}
--#else
--static inline int cond_resched(void)
--{
-- return _cond_resched();
--}
--#endif
--extern int cond_resched_lock(spinlock_t * lock);
--extern int cond_resched_softirq(void);
--static inline int cond_resched_bkl(void)
--{
-- return _cond_resched();
--}
--
--/*
-- * Does a critical section need to be broken due to another
-- * task waiting?: (technically does not depend on CONFIG_PREEMPT,
-- * but a general need for low latency)
-- */
--static inline int spin_needbreak(spinlock_t *lock)
--{
--#ifdef CONFIG_PREEMPT
-- return spin_is_contended(lock);
--#else
-- return 0;
--#endif
--}
--
--/*
-- * Reevaluate whether the task has signals pending delivery.
-- * Wake the task if so.
-- * This is required every time the blocked sigset_t changes.
-- * callers must hold sighand->siglock.
-- */
--extern void recalc_sigpending_and_wake(struct task_struct *t);
--extern void recalc_sigpending(void);
--
--extern void signal_wake_up(struct task_struct *t, int resume_stopped);
--
--/*
-- * Wrappers for p->thread_info->cpu access. No-op on UP.
-- */
--#ifdef CONFIG_SMP
--
--static inline unsigned int task_cpu(const struct task_struct *p)
--{
-- return task_thread_info(p)->cpu;
--}
--
--extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
--
--#else
--
--static inline unsigned int task_cpu(const struct task_struct *p)
--{
-- return 0;
--}
--
--static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
--{
--}
--
--#endif /* CONFIG_SMP */
--
--extern void arch_pick_mmap_layout(struct mm_struct *mm);
--
--#ifdef CONFIG_TRACING
--extern void
--__trace_special(void *__tr, void *__data,
-- unsigned long arg1, unsigned long arg2, unsigned long arg3);
--#else
--static inline void
--__trace_special(void *__tr, void *__data,
-- unsigned long arg1, unsigned long arg2, unsigned long arg3)
--{
--}
--#endif
--
--extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
--extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
--
--extern int sched_mc_power_savings, sched_smt_power_savings;
--
--extern void normalize_rt_tasks(void);
--
--#ifdef CONFIG_GROUP_SCHED
--
--extern struct task_group init_task_group;
--#ifdef CONFIG_USER_SCHED
--extern struct task_group root_task_group;
--#endif
--
--extern struct task_group *sched_create_group(struct task_group *parent);
--extern void sched_destroy_group(struct task_group *tg);
--extern void sched_move_task(struct task_struct *tsk);
--#ifdef CONFIG_FAIR_GROUP_SCHED
--extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
--extern unsigned long sched_group_shares(struct task_group *tg);
--#endif
--#ifdef CONFIG_RT_GROUP_SCHED
--extern int sched_group_set_rt_runtime(struct task_group *tg,
-- long rt_runtime_us);
--extern long sched_group_rt_runtime(struct task_group *tg);
--extern int sched_group_set_rt_period(struct task_group *tg,
-- long rt_period_us);
--extern long sched_group_rt_period(struct task_group *tg);
--#endif
--#endif
--
--#ifdef CONFIG_TASK_XACCT
--static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
--{
-- tsk->ioac.rchar += amt;
--}
--
--static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
--{
-- tsk->ioac.wchar += amt;
--}
--
--static inline void inc_syscr(struct task_struct *tsk)
--{
-- tsk->ioac.syscr++;
--}
--
--static inline void inc_syscw(struct task_struct *tsk)
--{
-- tsk->ioac.syscw++;
--}
--#else
--static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
--{
--}
--
--static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
--{
--}
--
--static inline void inc_syscr(struct task_struct *tsk)
--{
--}
--
--static inline void inc_syscw(struct task_struct *tsk)
--{
--}
--#endif
--
--#ifndef TASK_SIZE_OF
--#define TASK_SIZE_OF(tsk) TASK_SIZE
--#endif
--
--#ifdef CONFIG_MM_OWNER
--extern void mm_update_next_owner(struct mm_struct *mm);
--extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
--#else
--static inline void mm_update_next_owner(struct mm_struct *mm)
--{
--}
--
--static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
--{
--}
--#endif /* CONFIG_MM_OWNER */
--
--#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
--
--#endif /* __KERNEL__ */
--
--#endif
-diff -Nurb linux-2.6.27-720/include/linux/seccomp.h linux-2.6.27-710/include/linux/seccomp.h
---- linux-2.6.27-720/include/linux/seccomp.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/linux/seccomp.h 2008-10-09 18:13:53.000000000 -0400
-@@ -21,7 +21,7 @@
-
- #else /* CONFIG_SECCOMP */
-
--typedef EMPTY_STRUCT_DECL(/* unnamed */) seccomp_t;
-+typedef struct { } seccomp_t;
-
- #define secure_computing(x) do { } while (0)
-
-diff -Nurb linux-2.6.27-720/include/linux/security.h linux-2.6.27-710/include/linux/security.h
---- linux-2.6.27-720/include/linux/security.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/linux/security.h 2008-10-09 18:13:53.000000000 -0400
-@@ -2427,7 +2427,7 @@
- static inline struct dentry *securityfs_create_dir(const char *name,
- struct dentry *parent)
- {
-- return (struct dentry *) ERR_PTR(-ENODEV);
-+ return ERR_PTR(-ENODEV);
- }
-
- static inline struct dentry *securityfs_create_file(const char *name,
-@@ -2436,7 +2436,7 @@
- void *data,
- const struct file_operations *fops)
- {
-- return (struct dentry *) ERR_PTR(-ENODEV);
-+ return ERR_PTR(-ENODEV);
- }
-
- static inline void securityfs_remove(struct dentry *dentry)
-diff -Nurb linux-2.6.27-720/include/linux/semaphore.h linux-2.6.27-710/include/linux/semaphore.h
---- linux-2.6.27-720/include/linux/semaphore.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/linux/semaphore.h 2008-10-09 18:13:53.000000000 -0400
-@@ -19,21 +19,12 @@
- struct list_head wait_list;
- };
-
--#ifdef __cplusplus
--#define __SEMAPHORE_INITIALIZER(name, n) \
--({ struct semaphore duh; \
-- duh.lock = __SPIN_LOCK_UNLOCKED((name).lock), \
-- duh.count = n, \
-- duh.wait_list = LIST_HEAD_INIT((name).wait_list), \
-- duh;})
--#else
- #define __SEMAPHORE_INITIALIZER(name, n) \
- { \
- .lock = __SPIN_LOCK_UNLOCKED((name).lock), \
- .count = n, \
- .wait_list = LIST_HEAD_INIT((name).wait_list), \
- }
--#endif
-
- #define DECLARE_MUTEX(name) \
- struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
-diff -Nurb linux-2.6.27-720/include/linux/skbuff.h linux-2.6.27-710/include/linux/skbuff.h
---- linux-2.6.27-720/include/linux/skbuff.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/linux/skbuff.h 2009-05-04 12:15:31.000000000 -0400
-@@ -194,12 +194,6 @@
- typedef unsigned char *sk_buff_data_t;
- #endif
-
--/* Click: overload sk_buff.pkt_type to contain information about whether
-- a packet is clean. Clean packets have the following fields zero:
-- dst, destructor, pkt_bridged, prev, list, sk, security, priority. */
--#define PACKET_CLEAN 128 /* Is packet clean? */
--#define PACKET_TYPE_MASK 127 /* Actual packet type */
--
- /**
- * struct sk_buff - socket buffer
- * @next: Next buffer in list
-@@ -383,7 +377,6 @@
- gfp_t priority);
- extern struct sk_buff *pskb_copy(struct sk_buff *skb,
- gfp_t gfp_mask);
--extern struct sk_buff *skb_recycle(struct sk_buff *skb);
- extern int pskb_expand_head(struct sk_buff *skb,
- int nhead, int ntail,
- gfp_t gfp_mask);
-@@ -1333,7 +1326,7 @@
- }
-
- static inline int skb_add_data(struct sk_buff *skb,
-- unsigned char __user *from, int copy)
-+ char __user *from, int copy)
- {
- const int off = skb->len;
-
-@@ -1409,7 +1402,7 @@
- const void *start, unsigned int len)
- {
- if (skb->ip_summed == CHECKSUM_COMPLETE)
-- skb->csum = csum_sub(skb->csum, csum_partial((const unsigned char *) start, len, 0));
-+ skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
- }
-
- unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
-diff -Nurb linux-2.6.27-720/include/linux/skbuff.h.orig linux-2.6.27-710/include/linux/skbuff.h.orig
---- linux-2.6.27-720/include/linux/skbuff.h.orig 2009-05-04 12:15:31.000000000 -0400
-+++ linux-2.6.27-710/include/linux/skbuff.h.orig 1969-12-31 19:00:00.000000000 -0500
-@@ -1,1730 +0,0 @@
--/*
-- * Definitions for the 'struct sk_buff' memory handlers.
-- *
-- * Authors:
-- * Alan Cox, <gw4pts@gw4pts.ampr.org>
-- * Florian La Roche, <rzsfl@rz.uni-sb.de>
-- *
-- * This program is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License
-- * as published by the Free Software Foundation; either version
-- * 2 of the License, or (at your option) any later version.
-- */
--
--#ifndef _LINUX_SKBUFF_H
--#define _LINUX_SKBUFF_H
--
--#include <linux/kernel.h>
--#include <linux/compiler.h>
--#include <linux/time.h>
--#include <linux/cache.h>
--
--#include <asm/atomic.h>
--#include <asm/types.h>
--#include <linux/spinlock.h>
--#include <linux/net.h>
--#include <linux/textsearch.h>
--#include <net/checksum.h>
--#include <linux/rcupdate.h>
--#include <linux/dmaengine.h>
--#include <linux/hrtimer.h>
--
--#define HAVE_ALLOC_SKB /* For the drivers to know */
--#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
--
--/* Don't change this without changing skb_csum_unnecessary! */
--#define CHECKSUM_NONE 0
--#define CHECKSUM_UNNECESSARY 1
--#define CHECKSUM_COMPLETE 2
--#define CHECKSUM_PARTIAL 3
--
--#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
-- ~(SMP_CACHE_BYTES - 1))
--#define SKB_WITH_OVERHEAD(X) \
-- ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
--#define SKB_MAX_ORDER(X, ORDER) \
-- SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
--#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
--#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
--
--/* A. Checksumming of received packets by device.
-- *
-- * NONE: device failed to checksum this packet.
-- * skb->csum is undefined.
-- *
-- * UNNECESSARY: device parsed packet and wouldbe verified checksum.
-- * skb->csum is undefined.
-- * It is bad option, but, unfortunately, many of vendors do this.
-- * Apparently with secret goal to sell you new device, when you
-- * will add new protocol to your host. F.e. IPv6. 8)
-- *
-- * COMPLETE: the most generic way. Device supplied checksum of _all_
-- * the packet as seen by netif_rx in skb->csum.
-- * NOTE: Even if device supports only some protocols, but
-- * is able to produce some skb->csum, it MUST use COMPLETE,
-- * not UNNECESSARY.
-- *
-- * PARTIAL: identical to the case for output below. This may occur
-- * on a packet received directly from another Linux OS, e.g.,
-- * a virtualised Linux kernel on the same host. The packet can
-- * be treated in the same way as UNNECESSARY except that on
-- * output (i.e., forwarding) the checksum must be filled in
-- * by the OS or the hardware.
-- *
-- * B. Checksumming on output.
-- *
-- * NONE: skb is checksummed by protocol or csum is not required.
-- *
-- * PARTIAL: device is required to csum packet as seen by hard_start_xmit
-- * from skb->csum_start to the end and to record the checksum
-- * at skb->csum_start + skb->csum_offset.
-- *
-- * Device must show its capabilities in dev->features, set
-- * at device setup time.
-- * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
-- * everything.
-- * NETIF_F_NO_CSUM - loopback or reliable single hop media.
-- * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
-- * TCP/UDP over IPv4. Sigh. Vendors like this
-- * way by an unknown reason. Though, see comment above
-- * about CHECKSUM_UNNECESSARY. 8)
-- * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
-- *
-- * Any questions? No questions, good. --ANK
-- */
--
--struct net_device;
--struct scatterlist;
--struct pipe_inode_info;
--
--#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
--struct nf_conntrack {
-- atomic_t use;
--};
--#endif
--
--#ifdef CONFIG_BRIDGE_NETFILTER
--struct nf_bridge_info {
-- atomic_t use;
-- struct net_device *physindev;
-- struct net_device *physoutdev;
-- unsigned int mask;
-- unsigned long data[32 / sizeof(unsigned long)];
--};
--#endif
--
--struct sk_buff_head {
-- /* These two members must be first. */
-- struct sk_buff *next;
-- struct sk_buff *prev;
--
-- __u32 qlen;
-- spinlock_t lock;
--};
--
--struct sk_buff;
--
--/* To allow 64K frame to be packed as single skb without frag_list */
--#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
--
--typedef struct skb_frag_struct skb_frag_t;
--
--struct skb_frag_struct {
-- struct page *page;
-- __u32 page_offset;
-- __u32 size;
--};
--
--/* This data is invariant across clones and lives at
-- * the end of the header data, ie. at skb->end.
-- */
--struct skb_shared_info {
-- atomic_t dataref;
-- unsigned short nr_frags;
-- unsigned short gso_size;
-- /* Warning: this field is not always filled in (UFO)! */
-- unsigned short gso_segs;
-- unsigned short gso_type;
-- __be32 ip6_frag_id;
-- struct sk_buff *frag_list;
-- skb_frag_t frags[MAX_SKB_FRAGS];
--};
--
--/* We divide dataref into two halves. The higher 16 bits hold references
-- * to the payload part of skb->data. The lower 16 bits hold references to
-- * the entire skb->data. A clone of a headerless skb holds the length of
-- * the header in skb->hdr_len.
-- *
-- * All users must obey the rule that the skb->data reference count must be
-- * greater than or equal to the payload reference count.
-- *
-- * Holding a reference to the payload part means that the user does not
-- * care about modifications to the header part of skb->data.
-- */
--#define SKB_DATAREF_SHIFT 16
--#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
--
--
--enum {
-- SKB_FCLONE_UNAVAILABLE,
-- SKB_FCLONE_ORIG,
-- SKB_FCLONE_CLONE,
--};
--
--enum {
-- SKB_GSO_TCPV4 = 1 << 0,
-- SKB_GSO_UDP = 1 << 1,
--
-- /* This indicates the skb is from an untrusted source. */
-- SKB_GSO_DODGY = 1 << 2,
--
-- /* This indicates the tcp segment has CWR set. */
-- SKB_GSO_TCP_ECN = 1 << 3,
--
-- SKB_GSO_TCPV6 = 1 << 4,
--};
--
--#if BITS_PER_LONG > 32
--#define NET_SKBUFF_DATA_USES_OFFSET 1
--#endif
--
--#ifdef NET_SKBUFF_DATA_USES_OFFSET
--typedef unsigned int sk_buff_data_t;
--#else
--typedef unsigned char *sk_buff_data_t;
--#endif
--
--/**
-- * struct sk_buff - socket buffer
-- * @next: Next buffer in list
-- * @prev: Previous buffer in list
-- * @sk: Socket we are owned by
-- * @tstamp: Time we arrived
-- * @dev: Device we arrived on/are leaving by
-- * @transport_header: Transport layer header
-- * @network_header: Network layer header
-- * @mac_header: Link layer header
-- * @dst: destination entry
-- * @sp: the security path, used for xfrm
-- * @cb: Control buffer. Free for use by every layer. Put private vars here
-- * @len: Length of actual data
-- * @data_len: Data length
-- * @mac_len: Length of link layer header
-- * @hdr_len: writable header length of cloned skb
-- * @csum: Checksum (must include start/offset pair)
-- * @csum_start: Offset from skb->head where checksumming should start
-- * @csum_offset: Offset from csum_start where checksum should be stored
-- * @local_df: allow local fragmentation
-- * @cloned: Head may be cloned (check refcnt to be sure)
-- * @nohdr: Payload reference only, must not modify header
-- * @pkt_type: Packet class
-- * @fclone: skbuff clone status
-- * @ip_summed: Driver fed us an IP checksum
-- * @priority: Packet queueing priority
-- * @users: User count - see {datagram,tcp}.c
-- * @protocol: Packet protocol from driver
-- * @truesize: Buffer size
-- * @head: Head of buffer
-- * @data: Data head pointer
-- * @tail: Tail pointer
-- * @end: End pointer
-- * @destructor: Destruct function
-- * @mark: Generic packet mark
-- * @nfct: Associated connection, if any
-- * @ipvs_property: skbuff is owned by ipvs
-- * @peeked: this packet has been seen already, so stats have been
-- * done for it, don't do them again
-- * @nf_trace: netfilter packet trace flag
-- * @nfctinfo: Relationship of this skb to the connection
-- * @nfct_reasm: netfilter conntrack re-assembly pointer
-- * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
-- * @iif: ifindex of device we arrived on
-- * @queue_mapping: Queue mapping for multiqueue devices
-- * @tc_index: Traffic control index
-- * @tc_verd: traffic control verdict
-- * @ndisc_nodetype: router type (from link layer)
-- * @do_not_encrypt: set to prevent encryption of this frame
-- * @dma_cookie: a cookie to one of several possible DMA operations
-- * done by skb DMA functions
-- * @secmark: security marking
-- * @vlan_tci: vlan tag control information
-- */
--
--struct sk_buff {
-- /* These two members must be first. */
-- struct sk_buff *next;
-- struct sk_buff *prev;
--
-- struct sock *sk;
-- ktime_t tstamp;
-- struct net_device *dev;
--
-- union {
-- struct dst_entry *dst;
-- struct rtable *rtable;
-- };
-- struct sec_path *sp;
--
-- /*
-- * This is the control buffer. It is free to use for every
-- * layer. Please put your private variables there. If you
-- * want to keep them across layers you have to do a skb_clone()
-- * first. This is owned by whoever has the skb queued ATM.
-- */
-- char cb[48];
--
-- unsigned int len,
-- data_len;
-- __u16 mac_len,
-- hdr_len;
-- union {
-- __wsum csum;
-- struct {
-- __u16 csum_start;
-- __u16 csum_offset;
-- };
-- };
-- __u32 priority;
-- __u8 local_df:1,
-- cloned:1,
-- ip_summed:2,
-- nohdr:1,
-- nfctinfo:3;
-- __u8 pkt_type:3,
-- fclone:2,
-- ipvs_property:1,
-- peeked:1,
-- nf_trace:1;
-- __be16 protocol;
--
-- void (*destructor)(struct sk_buff *skb);
--#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-- struct nf_conntrack *nfct;
-- struct sk_buff *nfct_reasm;
--#endif
--#ifdef CONFIG_BRIDGE_NETFILTER
-- struct nf_bridge_info *nf_bridge;
--#endif
--
-- int iif;
-- __u16 queue_mapping;
--#ifdef CONFIG_NET_SCHED
-- __u16 tc_index; /* traffic control index */
--#ifdef CONFIG_NET_CLS_ACT
-- __u16 tc_verd; /* traffic control verdict */
--#endif
--#endif
--#ifdef CONFIG_IPV6_NDISC_NODETYPE
-- __u8 ndisc_nodetype:2;
--#endif
--#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
-- __u8 do_not_encrypt:1;
--#endif
-- /* 0/13/14 bit hole */
--
--#ifdef CONFIG_NET_DMA
-- dma_cookie_t dma_cookie;
--#endif
--#ifdef CONFIG_NETWORK_SECMARK
-- __u32 secmark;
--#endif
--
-- __u32 mark;
--#define skb_tag mark
--
-- __u16 vlan_tci;
--
-- sk_buff_data_t transport_header;
-- sk_buff_data_t network_header;
-- sk_buff_data_t mac_header;
-- /* These elements must be at the end, see alloc_skb() for details. */
-- sk_buff_data_t tail;
-- sk_buff_data_t end;
-- unsigned char *head,
-- *data;
-- unsigned int truesize;
-- atomic_t users;
--};
--
--#ifdef __KERNEL__
--/*
-- * Handling routines are only of interest to the kernel
-- */
--#include <linux/slab.h>
--
--#include <asm/system.h>
--
--extern void kfree_skb(struct sk_buff *skb);
--extern void __kfree_skb(struct sk_buff *skb);
--extern struct sk_buff *__alloc_skb(unsigned int size,
-- gfp_t priority, int fclone, int node);
--static inline struct sk_buff *alloc_skb(unsigned int size,
-- gfp_t priority)
--{
-- return __alloc_skb(size, priority, 0, -1);
--}
--
--static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
-- gfp_t priority)
--{
-- return __alloc_skb(size, priority, 1, -1);
--}
--
--extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
--extern struct sk_buff *skb_clone(struct sk_buff *skb,
-- gfp_t priority);
--extern struct sk_buff *skb_copy(const struct sk_buff *skb,
-- gfp_t priority);
--extern struct sk_buff *pskb_copy(struct sk_buff *skb,
-- gfp_t gfp_mask);
--extern int pskb_expand_head(struct sk_buff *skb,
-- int nhead, int ntail,
-- gfp_t gfp_mask);
--extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
-- unsigned int headroom);
--extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
-- int newheadroom, int newtailroom,
-- gfp_t priority);
--extern int skb_to_sgvec(struct sk_buff *skb,
-- struct scatterlist *sg, int offset,
-- int len);
--extern int skb_cow_data(struct sk_buff *skb, int tailbits,
-- struct sk_buff **trailer);
--extern int skb_pad(struct sk_buff *skb, int pad);
--#define dev_kfree_skb(a) kfree_skb(a)
--extern void skb_over_panic(struct sk_buff *skb, int len,
-- void *here);
--extern void skb_under_panic(struct sk_buff *skb, int len,
-- void *here);
--extern void skb_truesize_bug(struct sk_buff *skb);
--
--static inline void skb_truesize_check(struct sk_buff *skb)
--{
-- int len = sizeof(struct sk_buff) + skb->len;
--
-- if (unlikely((int)skb->truesize < len))
-- skb_truesize_bug(skb);
--}
--
--extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
-- int getfrag(void *from, char *to, int offset,
-- int len,int odd, struct sk_buff *skb),
-- void *from, int length);
--
--struct skb_seq_state
--{
-- __u32 lower_offset;
-- __u32 upper_offset;
-- __u32 frag_idx;
-- __u32 stepped_offset;
-- struct sk_buff *root_skb;
-- struct sk_buff *cur_skb;
-- __u8 *frag_data;
--};
--
--extern void skb_prepare_seq_read(struct sk_buff *skb,
-- unsigned int from, unsigned int to,
-- struct skb_seq_state *st);
--extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
-- struct skb_seq_state *st);
--extern void skb_abort_seq_read(struct skb_seq_state *st);
--
--extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
-- unsigned int to, struct ts_config *config,
-- struct ts_state *state);
--
--#ifdef NET_SKBUFF_DATA_USES_OFFSET
--static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
--{
-- return skb->head + skb->end;
--}
--#else
--static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
--{
-- return skb->end;
--}
--#endif
--
--/* Internal */
--#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
--
--/**
-- * skb_queue_empty - check if a queue is empty
-- * @list: queue head
-- *
-- * Returns true if the queue is empty, false otherwise.
-- */
--static inline int skb_queue_empty(const struct sk_buff_head *list)
--{
-- return list->next == (struct sk_buff *)list;
--}
--
--/**
-- * skb_get - reference buffer
-- * @skb: buffer to reference
-- *
-- * Makes another reference to a socket buffer and returns a pointer
-- * to the buffer.
-- */
--static inline struct sk_buff *skb_get(struct sk_buff *skb)
--{
-- atomic_inc(&skb->users);
-- return skb;
--}
--
--/*
-- * If users == 1, we are the only owner and are can avoid redundant
-- * atomic change.
-- */
--
--/**
-- * skb_cloned - is the buffer a clone
-- * @skb: buffer to check
-- *
-- * Returns true if the buffer was generated with skb_clone() and is
-- * one of multiple shared copies of the buffer. Cloned buffers are
-- * shared data so must not be written to under normal circumstances.
-- */
--static inline int skb_cloned(const struct sk_buff *skb)
--{
-- return skb->cloned &&
-- (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
--}
--
--/**
-- * skb_header_cloned - is the header a clone
-- * @skb: buffer to check
-- *
-- * Returns true if modifying the header part of the buffer requires
-- * the data to be copied.
-- */
--static inline int skb_header_cloned(const struct sk_buff *skb)
--{
-- int dataref;
--
-- if (!skb->cloned)
-- return 0;
--
-- dataref = atomic_read(&skb_shinfo(skb)->dataref);
-- dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
-- return dataref != 1;
--}
--
--/**
-- * skb_header_release - release reference to header
-- * @skb: buffer to operate on
-- *
-- * Drop a reference to the header part of the buffer. This is done
-- * by acquiring a payload reference. You must not read from the header
-- * part of skb->data after this.
-- */
--static inline void skb_header_release(struct sk_buff *skb)
--{
-- BUG_ON(skb->nohdr);
-- skb->nohdr = 1;
-- atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
--}
--
--/**
-- * skb_shared - is the buffer shared
-- * @skb: buffer to check
-- *
-- * Returns true if more than one person has a reference to this
-- * buffer.
-- */
--static inline int skb_shared(const struct sk_buff *skb)
--{
-- return atomic_read(&skb->users) != 1;
--}
--
--/**
-- * skb_share_check - check if buffer is shared and if so clone it
-- * @skb: buffer to check
-- * @pri: priority for memory allocation
-- *
-- * If the buffer is shared the buffer is cloned and the old copy
-- * drops a reference. A new clone with a single reference is returned.
-- * If the buffer is not shared the original buffer is returned. When
-- * being called from interrupt status or with spinlocks held pri must
-- * be GFP_ATOMIC.
-- *
-- * NULL is returned on a memory allocation failure.
-- */
--static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
-- gfp_t pri)
--{
-- might_sleep_if(pri & __GFP_WAIT);
-- if (skb_shared(skb)) {
-- struct sk_buff *nskb = skb_clone(skb, pri);
-- kfree_skb(skb);
-- skb = nskb;
-- }
-- return skb;
--}
--
--/*
-- * Copy shared buffers into a new sk_buff. We effectively do COW on
-- * packets to handle cases where we have a local reader and forward
-- * and a couple of other messy ones. The normal one is tcpdumping
-- * a packet thats being forwarded.
-- */
--
--/**
-- * skb_unshare - make a copy of a shared buffer
-- * @skb: buffer to check
-- * @pri: priority for memory allocation
-- *
-- * If the socket buffer is a clone then this function creates a new
-- * copy of the data, drops a reference count on the old copy and returns
-- * the new copy with the reference count at 1. If the buffer is not a clone
-- * the original buffer is returned. When called with a spinlock held or
-- * from interrupt state @pri must be %GFP_ATOMIC
-- *
-- * %NULL is returned on a memory allocation failure.
-- */
--static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
-- gfp_t pri)
--{
-- might_sleep_if(pri & __GFP_WAIT);
-- if (skb_cloned(skb)) {
-- struct sk_buff *nskb = skb_copy(skb, pri);
-- kfree_skb(skb); /* Free our shared copy */
-- skb = nskb;
-- }
-- return skb;
--}
--
--/**
-- * skb_peek
-- * @list_: list to peek at
-- *
-- * Peek an &sk_buff. Unlike most other operations you _MUST_
-- * be careful with this one. A peek leaves the buffer on the
-- * list and someone else may run off with it. You must hold
-- * the appropriate locks or have a private queue to do this.
-- *
-- * Returns %NULL for an empty list or a pointer to the head element.
-- * The reference count is not incremented and the reference is therefore
-- * volatile. Use with caution.
-- */
--static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
--{
-- struct sk_buff *list = ((struct sk_buff *)list_)->next;
-- if (list == (struct sk_buff *)list_)
-- list = NULL;
-- return list;
--}
--
--/**
-- * skb_peek_tail
-- * @list_: list to peek at
-- *
-- * Peek an &sk_buff. Unlike most other operations you _MUST_
-- * be careful with this one. A peek leaves the buffer on the
-- * list and someone else may run off with it. You must hold
-- * the appropriate locks or have a private queue to do this.
-- *
-- * Returns %NULL for an empty list or a pointer to the tail element.
-- * The reference count is not incremented and the reference is therefore
-- * volatile. Use with caution.
-- */
--static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
--{
-- struct sk_buff *list = ((struct sk_buff *)list_)->prev;
-- if (list == (struct sk_buff *)list_)
-- list = NULL;
-- return list;
--}
--
--/**
-- * skb_queue_len - get queue length
-- * @list_: list to measure
-- *
-- * Return the length of an &sk_buff queue.
-- */
--static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
--{
-- return list_->qlen;
--}
--
--/*
-- * This function creates a split out lock class for each invocation;
-- * this is needed for now since a whole lot of users of the skb-queue
-- * infrastructure in drivers have different locking usage (in hardirq)
-- * than the networking core (in softirq only). In the long run either the
-- * network layer or drivers should need annotation to consolidate the
-- * main types of usage into 3 classes.
-- */
--static inline void skb_queue_head_init(struct sk_buff_head *list)
--{
-- spin_lock_init(&list->lock);
-- list->prev = list->next = (struct sk_buff *)list;
-- list->qlen = 0;
--}
--
--static inline void skb_queue_head_init_class(struct sk_buff_head *list,
-- struct lock_class_key *class)
--{
-- skb_queue_head_init(list);
-- lockdep_set_class(&list->lock, class);
--}
--
--/*
-- * Insert an sk_buff on a list.
-- *
-- * The "__skb_xxxx()" functions are the non-atomic ones that
-- * can only be called with interrupts disabled.
-- */
--extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
--static inline void __skb_insert(struct sk_buff *newsk,
-- struct sk_buff *prev, struct sk_buff *next,
-- struct sk_buff_head *list)
--{
-- newsk->next = next;
-- newsk->prev = prev;
-- next->prev = prev->next = newsk;
-- list->qlen++;
--}
--
--/**
-- * __skb_queue_after - queue a buffer at the list head
-- * @list: list to use
-- * @prev: place after this buffer
-- * @newsk: buffer to queue
-- *
-- * Queue a buffer int the middle of a list. This function takes no locks
-- * and you must therefore hold required locks before calling it.
-- *
-- * A buffer cannot be placed on two lists at the same time.
-- */
--static inline void __skb_queue_after(struct sk_buff_head *list,
-- struct sk_buff *prev,
-- struct sk_buff *newsk)
--{
-- __skb_insert(newsk, prev, prev->next, list);
--}
--
--extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
-- struct sk_buff_head *list);
--
--static inline void __skb_queue_before(struct sk_buff_head *list,
-- struct sk_buff *next,
-- struct sk_buff *newsk)
--{
-- __skb_insert(newsk, next->prev, next, list);
--}
--
--/**
-- * __skb_queue_head - queue a buffer at the list head
-- * @list: list to use
-- * @newsk: buffer to queue
-- *
-- * Queue a buffer at the start of a list. This function takes no locks
-- * and you must therefore hold required locks before calling it.
-- *
-- * A buffer cannot be placed on two lists at the same time.
-- */
--extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
--static inline void __skb_queue_head(struct sk_buff_head *list,
-- struct sk_buff *newsk)
--{
-- __skb_queue_after(list, (struct sk_buff *)list, newsk);
--}
--
--/**
-- * __skb_queue_tail - queue a buffer at the list tail
-- * @list: list to use
-- * @newsk: buffer to queue
-- *
-- * Queue a buffer at the end of a list. This function takes no locks
-- * and you must therefore hold required locks before calling it.
-- *
-- * A buffer cannot be placed on two lists at the same time.
-- */
--extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
--static inline void __skb_queue_tail(struct sk_buff_head *list,
-- struct sk_buff *newsk)
--{
-- __skb_queue_before(list, (struct sk_buff *)list, newsk);
--}
--
--/*
-- * remove sk_buff from list. _Must_ be called atomically, and with
-- * the list known..
-- */
--extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
--static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
--{
-- struct sk_buff *next, *prev;
--
-- list->qlen--;
-- next = skb->next;
-- prev = skb->prev;
-- skb->next = skb->prev = NULL;
-- next->prev = prev;
-- prev->next = next;
--}
--
--/**
-- * __skb_dequeue - remove from the head of the queue
-- * @list: list to dequeue from
-- *
-- * Remove the head of the list. This function does not take any locks
-- * so must be used with appropriate locks held only. The head item is
-- * returned or %NULL if the list is empty.
-- */
--extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
--static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
--{
-- struct sk_buff *skb = skb_peek(list);
-- if (skb)
-- __skb_unlink(skb, list);
-- return skb;
--}
--
--/**
-- * __skb_dequeue_tail - remove from the tail of the queue
-- * @list: list to dequeue from
-- *
-- * Remove the tail of the list. This function does not take any locks
-- * so must be used with appropriate locks held only. The tail item is
-- * returned or %NULL if the list is empty.
-- */
--extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
--static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
--{
-- struct sk_buff *skb = skb_peek_tail(list);
-- if (skb)
-- __skb_unlink(skb, list);
-- return skb;
--}
--
--
--static inline int skb_is_nonlinear(const struct sk_buff *skb)
--{
-- return skb->data_len;
--}
--
--static inline unsigned int skb_headlen(const struct sk_buff *skb)
--{
-- return skb->len - skb->data_len;
--}
--
--static inline int skb_pagelen(const struct sk_buff *skb)
--{
-- int i, len = 0;
--
-- for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
-- len += skb_shinfo(skb)->frags[i].size;
-- return len + skb_headlen(skb);
--}
--
--static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
-- struct page *page, int off, int size)
--{
-- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
--
-- frag->page = page;
-- frag->page_offset = off;
-- frag->size = size;
-- skb_shinfo(skb)->nr_frags = i + 1;
--}
--
--#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
--#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
--#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
--
--#ifdef NET_SKBUFF_DATA_USES_OFFSET
--static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
--{
-- return skb->head + skb->tail;
--}
--
--static inline void skb_reset_tail_pointer(struct sk_buff *skb)
--{
-- skb->tail = skb->data - skb->head;
--}
--
--static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
--{
-- skb_reset_tail_pointer(skb);
-- skb->tail += offset;
--}
--#else /* NET_SKBUFF_DATA_USES_OFFSET */
--static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
--{
-- return skb->tail;
--}
--
--static inline void skb_reset_tail_pointer(struct sk_buff *skb)
--{
-- skb->tail = skb->data;
--}
--
--static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
--{
-- skb->tail = skb->data + offset;
--}
--
--#endif /* NET_SKBUFF_DATA_USES_OFFSET */
--
--/*
-- * Add data to an sk_buff
-- */
--extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
--static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
--{
-- unsigned char *tmp = skb_tail_pointer(skb);
-- SKB_LINEAR_ASSERT(skb);
-- skb->tail += len;
-- skb->len += len;
-- return tmp;
--}
--
--extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
--static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
--{
-- skb->data -= len;
-- skb->len += len;
-- return skb->data;
--}
--
--extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
--static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
--{
-- skb->len -= len;
-- BUG_ON(skb->len < skb->data_len);
-- return skb->data += len;
--}
--
--extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
--
--static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
--{
-- if (len > skb_headlen(skb) &&
-- !__pskb_pull_tail(skb, len - skb_headlen(skb)))
-- return NULL;
-- skb->len -= len;
-- return skb->data += len;
--}
--
--static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
--{
-- return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
--}
--
--static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
--{
-- if (likely(len <= skb_headlen(skb)))
-- return 1;
-- if (unlikely(len > skb->len))
-- return 0;
-- return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
--}
--
--/**
-- * skb_headroom - bytes at buffer head
-- * @skb: buffer to check
-- *
-- * Return the number of bytes of free space at the head of an &sk_buff.
-- */
--static inline unsigned int skb_headroom(const struct sk_buff *skb)
--{
-- return skb->data - skb->head;
--}
--
--/**
-- * skb_tailroom - bytes at buffer end
-- * @skb: buffer to check
-- *
-- * Return the number of bytes of free space at the tail of an sk_buff
-- */
--static inline int skb_tailroom(const struct sk_buff *skb)
--{
-- return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
--}
--
--/**
-- * skb_reserve - adjust headroom
-- * @skb: buffer to alter
-- * @len: bytes to move
-- *
-- * Increase the headroom of an empty &sk_buff by reducing the tail
-- * room. This is only allowed for an empty buffer.
-- */
--static inline void skb_reserve(struct sk_buff *skb, int len)
--{
-- skb->data += len;
-- skb->tail += len;
--}
--
--#ifdef NET_SKBUFF_DATA_USES_OFFSET
--static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
--{
-- return skb->head + skb->transport_header;
--}
--
--static inline void skb_reset_transport_header(struct sk_buff *skb)
--{
-- skb->transport_header = skb->data - skb->head;
--}
--
--static inline void skb_set_transport_header(struct sk_buff *skb,
-- const int offset)
--{
-- skb_reset_transport_header(skb);
-- skb->transport_header += offset;
--}
--
--static inline unsigned char *skb_network_header(const struct sk_buff *skb)
--{
-- return skb->head + skb->network_header;
--}
--
--static inline void skb_reset_network_header(struct sk_buff *skb)
--{
-- skb->network_header = skb->data - skb->head;
--}
--
--static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
--{
-- skb_reset_network_header(skb);
-- skb->network_header += offset;
--}
--
--static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
--{
-- return skb->head + skb->mac_header;
--}
--
--static inline int skb_mac_header_was_set(const struct sk_buff *skb)
--{
-- return skb->mac_header != ~0U;
--}
--
--static inline void skb_reset_mac_header(struct sk_buff *skb)
--{
-- skb->mac_header = skb->data - skb->head;
--}
--
--static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
--{
-- skb_reset_mac_header(skb);
-- skb->mac_header += offset;
--}
--
--#else /* NET_SKBUFF_DATA_USES_OFFSET */
--
--static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
--{
-- return skb->transport_header;
--}
--
--static inline void skb_reset_transport_header(struct sk_buff *skb)
--{
-- skb->transport_header = skb->data;
--}
--
--static inline void skb_set_transport_header(struct sk_buff *skb,
-- const int offset)
--{
-- skb->transport_header = skb->data + offset;
--}
--
--static inline unsigned char *skb_network_header(const struct sk_buff *skb)
--{
-- return skb->network_header;
--}
--
--static inline void skb_reset_network_header(struct sk_buff *skb)
--{
-- skb->network_header = skb->data;
--}
--
--static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
--{
-- skb->network_header = skb->data + offset;
--}
--
--static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
--{
-- return skb->mac_header;
--}
--
--static inline int skb_mac_header_was_set(const struct sk_buff *skb)
--{
-- return skb->mac_header != NULL;
--}
--
--static inline void skb_reset_mac_header(struct sk_buff *skb)
--{
-- skb->mac_header = skb->data;
--}
--
--static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
--{
-- skb->mac_header = skb->data + offset;
--}
--#endif /* NET_SKBUFF_DATA_USES_OFFSET */
--
--static inline int skb_transport_offset(const struct sk_buff *skb)
--{
-- return skb_transport_header(skb) - skb->data;
--}
--
--static inline u32 skb_network_header_len(const struct sk_buff *skb)
--{
-- return skb->transport_header - skb->network_header;
--}
--
--static inline int skb_network_offset(const struct sk_buff *skb)
--{
-- return skb_network_header(skb) - skb->data;
--}
--
--/*
-- * CPUs often take a performance hit when accessing unaligned memory
-- * locations. The actual performance hit varies, it can be small if the
-- * hardware handles it or large if we have to take an exception and fix it
-- * in software.
-- *
-- * Since an ethernet header is 14 bytes network drivers often end up with
-- * the IP header at an unaligned offset. The IP header can be aligned by
-- * shifting the start of the packet by 2 bytes. Drivers should do this
-- * with:
-- *
-- * skb_reserve(NET_IP_ALIGN);
-- *
-- * The downside to this alignment of the IP header is that the DMA is now
-- * unaligned. On some architectures the cost of an unaligned DMA is high
-- * and this cost outweighs the gains made by aligning the IP header.
-- *
-- * Since this trade off varies between architectures, we allow NET_IP_ALIGN
-- * to be overridden.
-- */
--#ifndef NET_IP_ALIGN
--#define NET_IP_ALIGN 2
--#endif
--
--/*
-- * The networking layer reserves some headroom in skb data (via
-- * dev_alloc_skb). This is used to avoid having to reallocate skb data when
-- * the header has to grow. In the default case, if the header has to grow
-- * 16 bytes or less we avoid the reallocation.
-- *
-- * Unfortunately this headroom changes the DMA alignment of the resulting
-- * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
-- * on some architectures. An architecture can override this value,
-- * perhaps setting it to a cacheline in size (since that will maintain
-- * cacheline alignment of the DMA). It must be a power of 2.
-- *
-- * Various parts of the networking layer expect at least 16 bytes of
-- * headroom, you should not reduce this.
-- */
--#ifndef NET_SKB_PAD
--#define NET_SKB_PAD 16
--#endif
--
--extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
--
--static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
--{
-- if (unlikely(skb->data_len)) {
-- WARN_ON(1);
-- return;
-- }
-- skb->len = len;
-- skb_set_tail_pointer(skb, len);
--}
--
--extern void skb_trim(struct sk_buff *skb, unsigned int len);
--
--static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
--{
-- if (skb->data_len)
-- return ___pskb_trim(skb, len);
-- __skb_trim(skb, len);
-- return 0;
--}
--
--static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
--{
-- return (len < skb->len) ? __pskb_trim(skb, len) : 0;
--}
--
--/**
-- * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
-- * @skb: buffer to alter
-- * @len: new length
-- *
-- * This is identical to pskb_trim except that the caller knows that
-- * the skb is not cloned so we should never get an error due to out-
-- * of-memory.
-- */
--static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
--{
-- int err = pskb_trim(skb, len);
-- BUG_ON(err);
--}
--
--/**
-- * skb_orphan - orphan a buffer
-- * @skb: buffer to orphan
-- *
-- * If a buffer currently has an owner then we call the owner's
-- * destructor function and make the @skb unowned. The buffer continues
-- * to exist but is no longer charged to its former owner.
-- */
--static inline void skb_orphan(struct sk_buff *skb)
--{
-- if (skb->destructor)
-- skb->destructor(skb);
-- skb->destructor = NULL;
-- skb->sk = NULL;
--}
--
--/**
-- * __skb_queue_purge - empty a list
-- * @list: list to empty
-- *
-- * Delete all buffers on an &sk_buff list. Each buffer is removed from
-- * the list and one reference dropped. This function does not take the
-- * list lock and the caller must hold the relevant locks to use it.
-- */
--extern void skb_queue_purge(struct sk_buff_head *list);
--static inline void __skb_queue_purge(struct sk_buff_head *list)
--{
-- struct sk_buff *skb;
-- while ((skb = __skb_dequeue(list)) != NULL)
-- kfree_skb(skb);
--}
--
--/**
-- * __dev_alloc_skb - allocate an skbuff for receiving
-- * @length: length to allocate
-- * @gfp_mask: get_free_pages mask, passed to alloc_skb
-- *
-- * Allocate a new &sk_buff and assign it a usage count of one. The
-- * buffer has unspecified headroom built in. Users should allocate
-- * the headroom they think they need without accounting for the
-- * built in space. The built in space is used for optimisations.
-- *
-- * %NULL is returned if there is no free memory.
-- */
--static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
-- gfp_t gfp_mask)
--{
-- struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
-- if (likely(skb))
-- skb_reserve(skb, NET_SKB_PAD);
-- return skb;
--}
--
--extern struct sk_buff *dev_alloc_skb(unsigned int length);
--
--extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
-- unsigned int length, gfp_t gfp_mask);
--
--/**
-- * netdev_alloc_skb - allocate an skbuff for rx on a specific device
-- * @dev: network device to receive on
-- * @length: length to allocate
-- *
-- * Allocate a new &sk_buff and assign it a usage count of one. The
-- * buffer has unspecified headroom built in. Users should allocate
-- * the headroom they think they need without accounting for the
-- * built in space. The built in space is used for optimisations.
-- *
-- * %NULL is returned if there is no free memory. Although this function
-- * allocates memory it can be called from an interrupt.
-- */
--static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
-- unsigned int length)
--{
-- return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
--}
--
--/**
-- * skb_clone_writable - is the header of a clone writable
-- * @skb: buffer to check
-- * @len: length up to which to write
-- *
-- * Returns true if modifying the header part of the cloned buffer
-- * does not requires the data to be copied.
-- */
--static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
--{
-- return !skb_header_cloned(skb) &&
-- skb_headroom(skb) + len <= skb->hdr_len;
--}
--
--static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
-- int cloned)
--{
-- int delta = 0;
--
-- if (headroom < NET_SKB_PAD)
-- headroom = NET_SKB_PAD;
-- if (headroom > skb_headroom(skb))
-- delta = headroom - skb_headroom(skb);
--
-- if (delta || cloned)
-- return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
-- GFP_ATOMIC);
-- return 0;
--}
--
--/**
-- * skb_cow - copy header of skb when it is required
-- * @skb: buffer to cow
-- * @headroom: needed headroom
-- *
-- * If the skb passed lacks sufficient headroom or its data part
-- * is shared, data is reallocated. If reallocation fails, an error
-- * is returned and original skb is not changed.
-- *
-- * The result is skb with writable area skb->head...skb->tail
-- * and at least @headroom of space at head.
-- */
--static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
--{
-- return __skb_cow(skb, headroom, skb_cloned(skb));
--}
--
--/**
-- * skb_cow_head - skb_cow but only making the head writable
-- * @skb: buffer to cow
-- * @headroom: needed headroom
-- *
-- * This function is identical to skb_cow except that we replace the
-- * skb_cloned check by skb_header_cloned. It should be used when
-- * you only need to push on some header and do not need to modify
-- * the data.
-- */
--static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
--{
-- return __skb_cow(skb, headroom, skb_header_cloned(skb));
--}
--
--/**
-- * skb_padto - pad an skbuff up to a minimal size
-- * @skb: buffer to pad
-- * @len: minimal length
-- *
-- * Pads up a buffer to ensure the trailing bytes exist and are
-- * blanked. If the buffer already contains sufficient data it
-- * is untouched. Otherwise it is extended. Returns zero on
-- * success. The skb is freed on error.
-- */
--
--static inline int skb_padto(struct sk_buff *skb, unsigned int len)
--{
-- unsigned int size = skb->len;
-- if (likely(size >= len))
-- return 0;
-- return skb_pad(skb, len - size);
--}
--
--static inline int skb_add_data(struct sk_buff *skb,
-- char __user *from, int copy)
--{
-- const int off = skb->len;
--
-- if (skb->ip_summed == CHECKSUM_NONE) {
-- int err = 0;
-- __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
-- copy, 0, &err);
-- if (!err) {
-- skb->csum = csum_block_add(skb->csum, csum, off);
-- return 0;
-- }
-- } else if (!copy_from_user(skb_put(skb, copy), from, copy))
-- return 0;
--
-- __skb_trim(skb, off);
-- return -EFAULT;
--}
--
--static inline int skb_can_coalesce(struct sk_buff *skb, int i,
-- struct page *page, int off)
--{
-- if (i) {
-- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
--
-- return page == frag->page &&
-- off == frag->page_offset + frag->size;
-- }
-- return 0;
--}
--
--static inline int __skb_linearize(struct sk_buff *skb)
--{
-- return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
--}
--
--/**
-- * skb_linearize - convert paged skb to linear one
-- * @skb: buffer to linarize
-- *
-- * If there is no free memory -ENOMEM is returned, otherwise zero
-- * is returned and the old skb data released.
-- */
--static inline int skb_linearize(struct sk_buff *skb)
--{
-- return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
--}
--
--/**
-- * skb_linearize_cow - make sure skb is linear and writable
-- * @skb: buffer to process
-- *
-- * If there is no free memory -ENOMEM is returned, otherwise zero
-- * is returned and the old skb data released.
-- */
--static inline int skb_linearize_cow(struct sk_buff *skb)
--{
-- return skb_is_nonlinear(skb) || skb_cloned(skb) ?
-- __skb_linearize(skb) : 0;
--}
--
--/**
-- * skb_postpull_rcsum - update checksum for received skb after pull
-- * @skb: buffer to update
-- * @start: start of data before pull
-- * @len: length of data pulled
-- *
-- * After doing a pull on a received packet, you need to call this to
-- * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
-- * CHECKSUM_NONE so that it can be recomputed from scratch.
-- */
--
--static inline void skb_postpull_rcsum(struct sk_buff *skb,
-- const void *start, unsigned int len)
--{
-- if (skb->ip_summed == CHECKSUM_COMPLETE)
-- skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
--}
--
--unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
--
--/**
-- * pskb_trim_rcsum - trim received skb and update checksum
-- * @skb: buffer to trim
-- * @len: new length
-- *
-- * This is exactly the same as pskb_trim except that it ensures the
-- * checksum of received packets are still valid after the operation.
-- */
--
--static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
--{
-- if (likely(len >= skb->len))
-- return 0;
-- if (skb->ip_summed == CHECKSUM_COMPLETE)
-- skb->ip_summed = CHECKSUM_NONE;
-- return __pskb_trim(skb, len);
--}
--
--#define skb_queue_walk(queue, skb) \
-- for (skb = (queue)->next; \
-- prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
-- skb = skb->next)
--
--#define skb_queue_walk_safe(queue, skb, tmp) \
-- for (skb = (queue)->next, tmp = skb->next; \
-- skb != (struct sk_buff *)(queue); \
-- skb = tmp, tmp = skb->next)
--
--#define skb_queue_reverse_walk(queue, skb) \
-- for (skb = (queue)->prev; \
-- prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
-- skb = skb->prev)
--
--
--extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
-- int *peeked, int *err);
--extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
-- int noblock, int *err);
--extern unsigned int datagram_poll(struct file *file, struct socket *sock,
-- struct poll_table_struct *wait);
--extern int skb_copy_datagram_iovec(const struct sk_buff *from,
-- int offset, struct iovec *to,
-- int size);
--extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
-- int hlen,
-- struct iovec *iov);
--extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
-- int offset,
-- struct iovec *from,
-- int len);
--extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
--extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
-- unsigned int flags);
--extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
-- int len, __wsum csum);
--extern int skb_copy_bits(const struct sk_buff *skb, int offset,
-- void *to, int len);
--extern int skb_store_bits(struct sk_buff *skb, int offset,
-- const void *from, int len);
--extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
-- int offset, u8 *to, int len,
-- __wsum csum);
--extern int skb_splice_bits(struct sk_buff *skb,
-- unsigned int offset,
-- struct pipe_inode_info *pipe,
-- unsigned int len,
-- unsigned int flags);
--extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
--extern void skb_split(struct sk_buff *skb,
-- struct sk_buff *skb1, const u32 len);
--
--extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
--
--static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
-- int len, void *buffer)
--{
-- int hlen = skb_headlen(skb);
--
-- if (hlen - offset >= len)
-- return skb->data + offset;
--
-- if (skb_copy_bits(skb, offset, buffer, len) < 0)
-- return NULL;
--
-- return buffer;
--}
--
--static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
-- void *to,
-- const unsigned int len)
--{
-- memcpy(to, skb->data, len);
--}
--
--static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
-- const int offset, void *to,
-- const unsigned int len)
--{
-- memcpy(to, skb->data + offset, len);
--}
--
--static inline void skb_copy_to_linear_data(struct sk_buff *skb,
-- const void *from,
-- const unsigned int len)
--{
-- memcpy(skb->data, from, len);
--}
--
--static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
-- const int offset,
-- const void *from,
-- const unsigned int len)
--{
-- memcpy(skb->data + offset, from, len);
--}
--
--extern void skb_init(void);
--
--/**
-- * skb_get_timestamp - get timestamp from a skb
-- * @skb: skb to get stamp from
-- * @stamp: pointer to struct timeval to store stamp in
-- *
-- * Timestamps are stored in the skb as offsets to a base timestamp.
-- * This function converts the offset back to a struct timeval and stores
-- * it in stamp.
-- */
--static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
--{
-- *stamp = ktime_to_timeval(skb->tstamp);
--}
--
--static inline void __net_timestamp(struct sk_buff *skb)
--{
-- skb->tstamp = ktime_get_real();
--}
--
--static inline ktime_t net_timedelta(ktime_t t)
--{
-- return ktime_sub(ktime_get_real(), t);
--}
--
--static inline ktime_t net_invalid_timestamp(void)
--{
-- return ktime_set(0, 0);
--}
--
--extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
--extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
--
--static inline int skb_csum_unnecessary(const struct sk_buff *skb)
--{
-- return skb->ip_summed & CHECKSUM_UNNECESSARY;
--}
--
--/**
-- * skb_checksum_complete - Calculate checksum of an entire packet
-- * @skb: packet to process
-- *
-- * This function calculates the checksum over the entire packet plus
-- * the value of skb->csum. The latter can be used to supply the
-- * checksum of a pseudo header as used by TCP/UDP. It returns the
-- * checksum.
-- *
-- * For protocols that contain complete checksums such as ICMP/TCP/UDP,
-- * this function can be used to verify that checksum on received
-- * packets. In that case the function should return zero if the
-- * checksum is correct. In particular, this function will return zero
-- * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
-- * hardware has already verified the correctness of the checksum.
-- */
--static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
--{
-- return skb_csum_unnecessary(skb) ?
-- 0 : __skb_checksum_complete(skb);
--}
--
--#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
--extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
--static inline void nf_conntrack_put(struct nf_conntrack *nfct)
--{
-- if (nfct && atomic_dec_and_test(&nfct->use))
-- nf_conntrack_destroy(nfct);
--}
--static inline void nf_conntrack_get(struct nf_conntrack *nfct)
--{
-- if (nfct)
-- atomic_inc(&nfct->use);
--}
--static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
--{
-- if (skb)
-- atomic_inc(&skb->users);
--}
--static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
--{
-- if (skb)
-- kfree_skb(skb);
--}
--#endif
--#ifdef CONFIG_BRIDGE_NETFILTER
--static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
--{
-- if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
-- kfree(nf_bridge);
--}
--static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
--{
-- if (nf_bridge)
-- atomic_inc(&nf_bridge->use);
--}
--#endif /* CONFIG_BRIDGE_NETFILTER */
--static inline void nf_reset(struct sk_buff *skb)
--{
--#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-- nf_conntrack_put(skb->nfct);
-- skb->nfct = NULL;
-- nf_conntrack_put_reasm(skb->nfct_reasm);
-- skb->nfct_reasm = NULL;
--#endif
--#ifdef CONFIG_BRIDGE_NETFILTER
-- nf_bridge_put(skb->nf_bridge);
-- skb->nf_bridge = NULL;
--#endif
--}
--
--/* Note: This doesn't put any conntrack and bridge info in dst. */
--static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
--{
--#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-- dst->nfct = src->nfct;
-- nf_conntrack_get(src->nfct);
-- dst->nfctinfo = src->nfctinfo;
-- dst->nfct_reasm = src->nfct_reasm;
-- nf_conntrack_get_reasm(src->nfct_reasm);
--#endif
--#ifdef CONFIG_BRIDGE_NETFILTER
-- dst->nf_bridge = src->nf_bridge;
-- nf_bridge_get(src->nf_bridge);
--#endif
--}
--
--static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
--{
--#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-- nf_conntrack_put(dst->nfct);
-- nf_conntrack_put_reasm(dst->nfct_reasm);
--#endif
--#ifdef CONFIG_BRIDGE_NETFILTER
-- nf_bridge_put(dst->nf_bridge);
--#endif
-- __nf_copy(dst, src);
--}
--
--#ifdef CONFIG_NETWORK_SECMARK
--static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
--{
-- to->secmark = from->secmark;
--}
--
--static inline void skb_init_secmark(struct sk_buff *skb)
--{
-- skb->secmark = 0;
--}
--#else
--static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
--{ }
--
--static inline void skb_init_secmark(struct sk_buff *skb)
--{ }
--#endif
--
--static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
--{
-- skb->queue_mapping = queue_mapping;
--}
--
--static inline u16 skb_get_queue_mapping(struct sk_buff *skb)
--{
-- return skb->queue_mapping;
--}
--
--static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
--{
-- to->queue_mapping = from->queue_mapping;
--}
--
--static inline int skb_is_gso(const struct sk_buff *skb)
--{
-- return skb_shinfo(skb)->gso_size;
--}
--
--static inline int skb_is_gso_v6(const struct sk_buff *skb)
--{
-- return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
--}
--
--extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
--
--static inline bool skb_warn_if_lro(const struct sk_buff *skb)
--{
-- /* LRO sets gso_size but not gso_type, whereas if GSO is really
-- * wanted then gso_type will be set. */
-- struct skb_shared_info *shinfo = skb_shinfo(skb);
-- if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
-- __skb_warn_lro_forwarding(skb);
-- return true;
-- }
-- return false;
--}
--
--static inline void skb_forward_csum(struct sk_buff *skb)
--{
-- /* Unfortunately we don't support this one. Any brave souls? */
-- if (skb->ip_summed == CHECKSUM_COMPLETE)
-- skb->ip_summed = CHECKSUM_NONE;
--}
--
--bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
--#endif /* __KERNEL__ */
--#endif /* _LINUX_SKBUFF_H */
-diff -Nurb linux-2.6.27-720/include/linux/spinlock.h linux-2.6.27-710/include/linux/spinlock.h
---- linux-2.6.27-720/include/linux/spinlock.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/linux/spinlock.h 2008-10-09 18:13:53.000000000 -0400
-@@ -54,7 +54,6 @@
- #include <linux/kernel.h>
- #include <linux/stringify.h>
- #include <linux/bottom_half.h>
--#include <linux/types.h>
-
- #include <asm/system.h>
-
-diff -Nurb linux-2.6.27-720/include/linux/spinlock_types.h linux-2.6.27-710/include/linux/spinlock_types.h
---- linux-2.6.27-720/include/linux/spinlock_types.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/linux/spinlock_types.h 2008-10-09 18:13:53.000000000 -0400
-@@ -51,47 +51,37 @@
-
- #define SPINLOCK_OWNER_INIT ((void *)-1L)
-
--#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
--# define SPINLOCK_BREAK_LOCK_INIT 0,
--#else
--# define SPINLOCK_BREAK_LOCK_INIT
--#endif
--
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
--# define SPIN_DEP_MAP_INIT(lockname) { 0, 0, #lockname }
-+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
- #else
- # define SPIN_DEP_MAP_INIT(lockname)
- #endif
-
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
--# define RW_DEP_MAP_INIT(lockname) { 0, 0, #lockname }
-+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
- #else
- # define RW_DEP_MAP_INIT(lockname)
- #endif
-
- #ifdef CONFIG_DEBUG_SPINLOCK
- # define __SPIN_LOCK_UNLOCKED(lockname) \
-- (spinlock_t) { /*raw_lock*/ __RAW_SPIN_LOCK_UNLOCKED, \
-- /*break_lock*/ SPINLOCK_BREAK_LOCK_INIT \
-- /*magic*/ SPINLOCK_MAGIC, \
-- /*owner_cpu*/ -1, \
-- /*owner*/ SPINLOCK_OWNER_INIT, \
-+ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
-+ .magic = SPINLOCK_MAGIC, \
-+ .owner = SPINLOCK_OWNER_INIT, \
-+ .owner_cpu = -1, \
- SPIN_DEP_MAP_INIT(lockname) }
- #define __RW_LOCK_UNLOCKED(lockname) \
-- (rwlock_t) { /*raw_lock*/ __RAW_RW_LOCK_UNLOCKED, \
-- /*break_lock*/ SPINLOCK_BREAK_LOCK_INIT \
-- /*magic*/ RWLOCK_MAGIC, \
-- /*owner_cpu*/ -1, \
-- /*owner*/ SPINLOCK_OWNER_INIT, \
-+ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
-+ .magic = RWLOCK_MAGIC, \
-+ .owner = SPINLOCK_OWNER_INIT, \
-+ .owner_cpu = -1, \
- RW_DEP_MAP_INIT(lockname) }
- #else
- # define __SPIN_LOCK_UNLOCKED(lockname) \
-- (spinlock_t) { /*raw_lock*/ __RAW_SPIN_LOCK_UNLOCKED, \
-- /*break_lock*/ SPINLOCK_BREAK_LOCK_INIT \
-+ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
- SPIN_DEP_MAP_INIT(lockname) }
- #define __RW_LOCK_UNLOCKED(lockname) \
-- (rwlock_t) { /*raw_lock*/ __RAW_RW_LOCK_UNLOCKED, \
-- /*break_lock*/ SPINLOCK_BREAK_LOCK_INIT \
-+ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
- RW_DEP_MAP_INIT(lockname) }
- #endif
-
-diff -Nurb linux-2.6.27-720/include/linux/stddef.h linux-2.6.27-710/include/linux/stddef.h
---- linux-2.6.27-720/include/linux/stddef.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/linux/stddef.h 2008-10-09 18:13:53.000000000 -0400
-@@ -12,12 +12,10 @@
-
- #ifdef __KERNEL__
-
--#ifndef __cplusplus
- enum {
- false = 0,
- true = 1
- };
--#endif
-
- #undef offsetof
- #ifdef __compiler_offsetof
-diff -Nurb linux-2.6.27-720/include/linux/sysctl.h linux-2.6.27-710/include/linux/sysctl.h
---- linux-2.6.27-720/include/linux/sysctl.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/linux/sysctl.h 2009-05-04 12:15:30.000000000 -0400
-@@ -985,7 +985,7 @@
- void __user *oldval, size_t __user *oldlenp,
- void __user *newval, size_t newlen);
-
--typedef int proc_handler_t (struct ctl_table *ctl, int write, struct file * filp,
-+typedef int proc_handler (struct ctl_table *ctl, int write, struct file * filp,
- void __user *buffer, size_t *lenp, loff_t *ppos);
-
- extern int proc_dostring(struct ctl_table *, int, struct file *,
-@@ -1066,7 +1066,7 @@
- mode_t mode;
- struct ctl_table *child;
- struct ctl_table *parent; /* Automatically set */
-- proc_handler_t *proc_handler; /* Callback for text formatting */
-+ proc_handler *proc_handler; /* Callback for text formatting */
- ctl_handler *strategy; /* Callback function for all r/w */
- void *extra1;
- void *extra2;
-diff -Nurb linux-2.6.27-720/include/linux/sysctl.h.orig linux-2.6.27-710/include/linux/sysctl.h.orig
---- linux-2.6.27-720/include/linux/sysctl.h.orig 2009-05-04 12:15:30.000000000 -0400
-+++ linux-2.6.27-710/include/linux/sysctl.h.orig 1969-12-31 19:00:00.000000000 -0500
-@@ -1,1120 +0,0 @@
--/*
-- * sysctl.h: General linux system control interface
-- *
-- * Begun 24 March 1995, Stephen Tweedie
-- *
-- ****************************************************************
-- ****************************************************************
-- **
-- ** WARNING:
-- ** The values in this file are exported to user space via
-- ** the sysctl() binary interface. Do *NOT* change the
-- ** numbering of any existing values here, and do not change
-- ** any numbers within any one set of values. If you have to
-- ** redefine an existing interface, use a new number for it.
-- ** The kernel will then return -ENOTDIR to any application using
-- ** the old binary interface.
-- **
-- ** For new interfaces unless you really need a binary number
-- ** please use CTL_UNNUMBERED.
-- **
-- ****************************************************************
-- ****************************************************************
-- */
--
--#ifndef _LINUX_SYSCTL_H
--#define _LINUX_SYSCTL_H
--
--#include <linux/kernel.h>
--#include <linux/types.h>
--#include <linux/compiler.h>
--
--struct file;
--struct completion;
--
--#define CTL_MAXNAME 10 /* how many path components do we allow in a
-- call to sysctl? In other words, what is
-- the largest acceptable value for the nlen
-- member of a struct __sysctl_args to have? */
--
--struct __sysctl_args {
-- int __user *name;
-- int nlen;
-- void __user *oldval;
-- size_t __user *oldlenp;
-- void __user *newval;
-- size_t newlen;
-- unsigned long __unused[4];
--};
--
--/* Define sysctl names first */
--
--/* Top-level names: */
--
--/* For internal pattern-matching use only: */
--#ifdef __KERNEL__
--#define CTL_NONE 0
--#define CTL_UNNUMBERED CTL_NONE /* sysctl without a binary number */
--#endif
--
--enum
--{
-- CTL_KERN=1, /* General kernel info and control */
-- CTL_VM=2, /* VM management */
-- CTL_NET=3, /* Networking */
-- CTL_PROC=4, /* removal breaks strace(1) compilation */
-- CTL_FS=5, /* Filesystems */
-- CTL_DEBUG=6, /* Debugging */
-- CTL_DEV=7, /* Devices */
-- CTL_BUS=8, /* Busses */
-- CTL_ABI=9, /* Binary emulation */
-- CTL_CPU=10, /* CPU stuff (speed scaling, etc) */
-- CTL_ARLAN=254, /* arlan wireless driver */
-- CTL_VSERVER=4242, /* Linux-VServer debug */
-- CTL_S390DBF=5677, /* s390 debug */
-- CTL_SUNRPC=7249, /* sunrpc debug */
-- CTL_PM=9899, /* frv power management */
-- CTL_FRV=9898, /* frv specific sysctls */
--};
--
--/* CTL_BUS names: */
--enum
--{
-- CTL_BUS_ISA=1 /* ISA */
--};
--
--/* /proc/sys/fs/inotify/ */
--enum
--{
-- INOTIFY_MAX_USER_INSTANCES=1, /* max instances per user */
-- INOTIFY_MAX_USER_WATCHES=2, /* max watches per user */
-- INOTIFY_MAX_QUEUED_EVENTS=3 /* max queued events per instance */
--};
--
--/* CTL_KERN names: */
--enum
--{
-- KERN_OSTYPE=1, /* string: system version */
-- KERN_OSRELEASE=2, /* string: system release */
-- KERN_OSREV=3, /* int: system revision */
-- KERN_VERSION=4, /* string: compile time info */
-- KERN_SECUREMASK=5, /* struct: maximum rights mask */
-- KERN_PROF=6, /* table: profiling information */
-- KERN_NODENAME=7,
-- KERN_DOMAINNAME=8,
--
-- KERN_PANIC=15, /* int: panic timeout */
-- KERN_REALROOTDEV=16, /* real root device to mount after initrd */
-- KERN_VSHELPER=17, /* string: path to vshelper policy agent */
--
-- KERN_SPARC_REBOOT=21, /* reboot command on Sparc */
-- KERN_CTLALTDEL=22, /* int: allow ctl-alt-del to reboot */
-- KERN_PRINTK=23, /* struct: control printk logging parameters */
-- KERN_NAMETRANS=24, /* Name translation */
-- KERN_PPC_HTABRECLAIM=25, /* turn htab reclaimation on/off on PPC */
-- KERN_PPC_ZEROPAGED=26, /* turn idle page zeroing on/off on PPC */
-- KERN_PPC_POWERSAVE_NAP=27, /* use nap mode for power saving */
-- KERN_MODPROBE=28,
-- KERN_SG_BIG_BUFF=29,
-- KERN_ACCT=30, /* BSD process accounting parameters */
-- KERN_PPC_L2CR=31, /* l2cr register on PPC */
--
-- KERN_RTSIGNR=32, /* Number of rt sigs queued */
-- KERN_RTSIGMAX=33, /* Max queuable */
--
-- KERN_SHMMAX=34, /* long: Maximum shared memory segment */
-- KERN_MSGMAX=35, /* int: Maximum size of a messege */
-- KERN_MSGMNB=36, /* int: Maximum message queue size */
-- KERN_MSGPOOL=37, /* int: Maximum system message pool size */
-- KERN_SYSRQ=38, /* int: Sysreq enable */
-- KERN_MAX_THREADS=39, /* int: Maximum nr of threads in the system */
-- KERN_RANDOM=40, /* Random driver */
-- KERN_SHMALL=41, /* int: Maximum size of shared memory */
-- KERN_MSGMNI=42, /* int: msg queue identifiers */
-- KERN_SEM=43, /* struct: sysv semaphore limits */
-- KERN_SPARC_STOP_A=44, /* int: Sparc Stop-A enable */
-- KERN_SHMMNI=45, /* int: shm array identifiers */
-- KERN_OVERFLOWUID=46, /* int: overflow UID */
-- KERN_OVERFLOWGID=47, /* int: overflow GID */
-- KERN_SHMPATH=48, /* string: path to shm fs */
-- KERN_HOTPLUG=49, /* string: path to uevent helper (deprecated) */
-- KERN_IEEE_EMULATION_WARNINGS=50, /* int: unimplemented ieee instructions */
-- KERN_S390_USER_DEBUG_LOGGING=51, /* int: dumps of user faults */
-- KERN_CORE_USES_PID=52, /* int: use core or core.%pid */
-- KERN_TAINTED=53, /* int: various kernel tainted flags */
-- KERN_CADPID=54, /* int: PID of the process to notify on CAD */
-- KERN_PIDMAX=55, /* int: PID # limit */
-- KERN_CORE_PATTERN=56, /* string: pattern for core-file names */
-- KERN_PANIC_ON_OOPS=57, /* int: whether we will panic on an oops */
-- KERN_HPPA_PWRSW=58, /* int: hppa soft-power enable */
-- KERN_HPPA_UNALIGNED=59, /* int: hppa unaligned-trap enable */
-- KERN_PRINTK_RATELIMIT=60, /* int: tune printk ratelimiting */
-- KERN_PRINTK_RATELIMIT_BURST=61, /* int: tune printk ratelimiting */
-- KERN_PTY=62, /* dir: pty driver */
-- KERN_NGROUPS_MAX=63, /* int: NGROUPS_MAX */
-- KERN_SPARC_SCONS_PWROFF=64, /* int: serial console power-off halt */
-- KERN_HZ_TIMER=65, /* int: hz timer on or off */
-- KERN_UNKNOWN_NMI_PANIC=66, /* int: unknown nmi panic flag */
-- KERN_BOOTLOADER_TYPE=67, /* int: boot loader type */
-- KERN_RANDOMIZE=68, /* int: randomize virtual address space */
-- KERN_SETUID_DUMPABLE=69, /* int: behaviour of dumps for setuid core */
-- KERN_SPIN_RETRY=70, /* int: number of spinlock retries */
-- KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */
-- KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */
-- KERN_COMPAT_LOG=73, /* int: print compat layer messages */
-- KERN_MAX_LOCK_DEPTH=74,
-- KERN_NMI_WATCHDOG=75, /* int: enable/disable nmi watchdog */
-- KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
--};
--
--
--
--/* CTL_VM names: */
--enum
--{
-- VM_UNUSED1=1, /* was: struct: Set vm swapping control */
-- VM_UNUSED2=2, /* was; int: Linear or sqrt() swapout for hogs */
-- VM_UNUSED3=3, /* was: struct: Set free page thresholds */
-- VM_UNUSED4=4, /* Spare */
-- VM_OVERCOMMIT_MEMORY=5, /* Turn off the virtual memory safety limit */
-- VM_UNUSED5=6, /* was: struct: Set buffer memory thresholds */
-- VM_UNUSED7=7, /* was: struct: Set cache memory thresholds */
-- VM_UNUSED8=8, /* was: struct: Control kswapd behaviour */
-- VM_UNUSED9=9, /* was: struct: Set page table cache parameters */
-- VM_PAGE_CLUSTER=10, /* int: set number of pages to swap together */
-- VM_DIRTY_BACKGROUND=11, /* dirty_background_ratio */
-- VM_DIRTY_RATIO=12, /* dirty_ratio */
-- VM_DIRTY_WB_CS=13, /* dirty_writeback_centisecs */
-- VM_DIRTY_EXPIRE_CS=14, /* dirty_expire_centisecs */
-- VM_NR_PDFLUSH_THREADS=15, /* nr_pdflush_threads */
-- VM_OVERCOMMIT_RATIO=16, /* percent of RAM to allow overcommit in */
-- VM_PAGEBUF=17, /* struct: Control pagebuf parameters */
-- VM_HUGETLB_PAGES=18, /* int: Number of available Huge Pages */
-- VM_SWAPPINESS=19, /* Tendency to steal mapped memory */
-- VM_LOWMEM_RESERVE_RATIO=20,/* reservation ratio for lower memory zones */
-- VM_MIN_FREE_KBYTES=21, /* Minimum free kilobytes to maintain */
-- VM_MAX_MAP_COUNT=22, /* int: Maximum number of mmaps/address-space */
-- VM_LAPTOP_MODE=23, /* vm laptop mode */
-- VM_BLOCK_DUMP=24, /* block dump mode */
-- VM_HUGETLB_GROUP=25, /* permitted hugetlb group */
-- VM_VFS_CACHE_PRESSURE=26, /* dcache/icache reclaim pressure */
-- VM_LEGACY_VA_LAYOUT=27, /* legacy/compatibility virtual address space layout */
-- VM_SWAP_TOKEN_TIMEOUT=28, /* default time for token time out */
-- VM_DROP_PAGECACHE=29, /* int: nuke lots of pagecache */
-- VM_PERCPU_PAGELIST_FRACTION=30,/* int: fraction of pages in each percpu_pagelist */
-- VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */
-- VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */
-- VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
-- VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
-- VM_MIN_SLAB=35, /* Percent pages ignored by zone reclaim */
--};
--
--
--/* CTL_NET names: */
--enum
--{
-- NET_CORE=1,
-- NET_ETHER=2,
-- NET_802=3,
-- NET_UNIX=4,
-- NET_IPV4=5,
-- NET_IPX=6,
-- NET_ATALK=7,
-- NET_NETROM=8,
-- NET_AX25=9,
-- NET_BRIDGE=10,
-- NET_ROSE=11,
-- NET_IPV6=12,
-- NET_X25=13,
-- NET_TR=14,
-- NET_DECNET=15,
-- NET_ECONET=16,
-- NET_SCTP=17,
-- NET_LLC=18,
-- NET_NETFILTER=19,
-- NET_DCCP=20,
-- NET_IRDA=412,
--};
--
--/* /proc/sys/kernel/random */
--enum
--{
-- RANDOM_POOLSIZE=1,
-- RANDOM_ENTROPY_COUNT=2,
-- RANDOM_READ_THRESH=3,
-- RANDOM_WRITE_THRESH=4,
-- RANDOM_BOOT_ID=5,
-- RANDOM_UUID=6
--};
--
--/* /proc/sys/kernel/pty */
--enum
--{
-- PTY_MAX=1,
-- PTY_NR=2
--};
--
--/* /proc/sys/bus/isa */
--enum
--{
-- BUS_ISA_MEM_BASE=1,
-- BUS_ISA_PORT_BASE=2,
-- BUS_ISA_PORT_SHIFT=3
--};
--
--/* /proc/sys/net/core */
--enum
--{
-- NET_CORE_WMEM_MAX=1,
-- NET_CORE_RMEM_MAX=2,
-- NET_CORE_WMEM_DEFAULT=3,
-- NET_CORE_RMEM_DEFAULT=4,
--/* was NET_CORE_DESTROY_DELAY */
-- NET_CORE_MAX_BACKLOG=6,
-- NET_CORE_FASTROUTE=7,
-- NET_CORE_MSG_COST=8,
-- NET_CORE_MSG_BURST=9,
-- NET_CORE_OPTMEM_MAX=10,
-- NET_CORE_HOT_LIST_LENGTH=11,
-- NET_CORE_DIVERT_VERSION=12,
-- NET_CORE_NO_CONG_THRESH=13,
-- NET_CORE_NO_CONG=14,
-- NET_CORE_LO_CONG=15,
-- NET_CORE_MOD_CONG=16,
-- NET_CORE_DEV_WEIGHT=17,
-- NET_CORE_SOMAXCONN=18,
-- NET_CORE_BUDGET=19,
-- NET_CORE_AEVENT_ETIME=20,
-- NET_CORE_AEVENT_RSEQTH=21,
-- NET_CORE_WARNINGS=22,
--};
--
--/* /proc/sys/net/ethernet */
--
--/* /proc/sys/net/802 */
--
--/* /proc/sys/net/unix */
--
--enum
--{
-- NET_UNIX_DESTROY_DELAY=1,
-- NET_UNIX_DELETE_DELAY=2,
-- NET_UNIX_MAX_DGRAM_QLEN=3,
--};
--
--/* /proc/sys/net/netfilter */
--enum
--{
-- NET_NF_CONNTRACK_MAX=1,
-- NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
-- NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
-- NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
-- NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
-- NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
-- NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
-- NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
-- NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
-- NET_NF_CONNTRACK_UDP_TIMEOUT=10,
-- NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
-- NET_NF_CONNTRACK_ICMP_TIMEOUT=12,
-- NET_NF_CONNTRACK_GENERIC_TIMEOUT=13,
-- NET_NF_CONNTRACK_BUCKETS=14,
-- NET_NF_CONNTRACK_LOG_INVALID=15,
-- NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16,
-- NET_NF_CONNTRACK_TCP_LOOSE=17,
-- NET_NF_CONNTRACK_TCP_BE_LIBERAL=18,
-- NET_NF_CONNTRACK_TCP_MAX_RETRANS=19,
-- NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20,
-- NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21,
-- NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22,
-- NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23,
-- NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24,
-- NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
-- NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
-- NET_NF_CONNTRACK_COUNT=27,
-- NET_NF_CONNTRACK_ICMPV6_TIMEOUT=28,
-- NET_NF_CONNTRACK_FRAG6_TIMEOUT=29,
-- NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30,
-- NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31,
-- NET_NF_CONNTRACK_CHECKSUM=32,
--};
--
--/* /proc/sys/net/ipv4 */
--enum
--{
-- /* v2.0 compatibile variables */
-- NET_IPV4_FORWARD=8,
-- NET_IPV4_DYNADDR=9,
--
-- NET_IPV4_CONF=16,
-- NET_IPV4_NEIGH=17,
-- NET_IPV4_ROUTE=18,
-- NET_IPV4_FIB_HASH=19,
-- NET_IPV4_NETFILTER=20,
--
-- NET_IPV4_TCP_TIMESTAMPS=33,
-- NET_IPV4_TCP_WINDOW_SCALING=34,
-- NET_IPV4_TCP_SACK=35,
-- NET_IPV4_TCP_RETRANS_COLLAPSE=36,
-- NET_IPV4_DEFAULT_TTL=37,
-- NET_IPV4_AUTOCONFIG=38,
-- NET_IPV4_NO_PMTU_DISC=39,
-- NET_IPV4_TCP_SYN_RETRIES=40,
-- NET_IPV4_IPFRAG_HIGH_THRESH=41,
-- NET_IPV4_IPFRAG_LOW_THRESH=42,
-- NET_IPV4_IPFRAG_TIME=43,
-- NET_IPV4_TCP_MAX_KA_PROBES=44,
-- NET_IPV4_TCP_KEEPALIVE_TIME=45,
-- NET_IPV4_TCP_KEEPALIVE_PROBES=46,
-- NET_IPV4_TCP_RETRIES1=47,
-- NET_IPV4_TCP_RETRIES2=48,
-- NET_IPV4_TCP_FIN_TIMEOUT=49,
-- NET_IPV4_IP_MASQ_DEBUG=50,
-- NET_TCP_SYNCOOKIES=51,
-- NET_TCP_STDURG=52,
-- NET_TCP_RFC1337=53,
-- NET_TCP_SYN_TAILDROP=54,
-- NET_TCP_MAX_SYN_BACKLOG=55,
-- NET_IPV4_LOCAL_PORT_RANGE=56,
-- NET_IPV4_ICMP_ECHO_IGNORE_ALL=57,
-- NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS=58,
-- NET_IPV4_ICMP_SOURCEQUENCH_RATE=59,
-- NET_IPV4_ICMP_DESTUNREACH_RATE=60,
-- NET_IPV4_ICMP_TIMEEXCEED_RATE=61,
-- NET_IPV4_ICMP_PARAMPROB_RATE=62,
-- NET_IPV4_ICMP_ECHOREPLY_RATE=63,
-- NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES=64,
-- NET_IPV4_IGMP_MAX_MEMBERSHIPS=65,
-- NET_TCP_TW_RECYCLE=66,
-- NET_IPV4_ALWAYS_DEFRAG=67,
-- NET_IPV4_TCP_KEEPALIVE_INTVL=68,
-- NET_IPV4_INET_PEER_THRESHOLD=69,
-- NET_IPV4_INET_PEER_MINTTL=70,
-- NET_IPV4_INET_PEER_MAXTTL=71,
-- NET_IPV4_INET_PEER_GC_MINTIME=72,
-- NET_IPV4_INET_PEER_GC_MAXTIME=73,
-- NET_TCP_ORPHAN_RETRIES=74,
-- NET_TCP_ABORT_ON_OVERFLOW=75,
-- NET_TCP_SYNACK_RETRIES=76,
-- NET_TCP_MAX_ORPHANS=77,
-- NET_TCP_MAX_TW_BUCKETS=78,
-- NET_TCP_FACK=79,
-- NET_TCP_REORDERING=80,
-- NET_TCP_ECN=81,
-- NET_TCP_DSACK=82,
-- NET_TCP_MEM=83,
-- NET_TCP_WMEM=84,
-- NET_TCP_RMEM=85,
-- NET_TCP_APP_WIN=86,
-- NET_TCP_ADV_WIN_SCALE=87,
-- NET_IPV4_NONLOCAL_BIND=88,
-- NET_IPV4_ICMP_RATELIMIT=89,
-- NET_IPV4_ICMP_RATEMASK=90,
-- NET_TCP_TW_REUSE=91,
-- NET_TCP_FRTO=92,
-- NET_TCP_LOW_LATENCY=93,
-- NET_IPV4_IPFRAG_SECRET_INTERVAL=94,
-- NET_IPV4_IGMP_MAX_MSF=96,
-- NET_TCP_NO_METRICS_SAVE=97,
-- NET_TCP_DEFAULT_WIN_SCALE=105,
-- NET_TCP_MODERATE_RCVBUF=106,
-- NET_TCP_TSO_WIN_DIVISOR=107,
-- NET_TCP_BIC_BETA=108,
-- NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR=109,
-- NET_TCP_CONG_CONTROL=110,
-- NET_TCP_ABC=111,
-- NET_IPV4_IPFRAG_MAX_DIST=112,
-- NET_TCP_MTU_PROBING=113,
-- NET_TCP_BASE_MSS=114,
-- NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115,
-- NET_TCP_DMA_COPYBREAK=116,
-- NET_TCP_SLOW_START_AFTER_IDLE=117,
-- NET_CIPSOV4_CACHE_ENABLE=118,
-- NET_CIPSOV4_CACHE_BUCKET_SIZE=119,
-- NET_CIPSOV4_RBM_OPTFMT=120,
-- NET_CIPSOV4_RBM_STRICTVALID=121,
-- NET_TCP_AVAIL_CONG_CONTROL=122,
-- NET_TCP_ALLOWED_CONG_CONTROL=123,
-- NET_TCP_MAX_SSTHRESH=124,
-- NET_TCP_FRTO_RESPONSE=125,
--#ifdef CONFIG_ICMP_IPOD
-- NET_IPV4_ICMP_IPOD_VERSION,
-- NET_IPV4_ICMP_IPOD_ENABLED,
-- NET_IPV4_ICMP_IPOD_HOST,
-- NET_IPV4_ICMP_IPOD_MASK,
-- NET_IPV4_ICMP_IPOD_KEY
--#endif
--};
--
--enum {
-- NET_IPV4_ROUTE_FLUSH=1,
-- NET_IPV4_ROUTE_MIN_DELAY=2, /* obsolete since 2.6.25 */
-- NET_IPV4_ROUTE_MAX_DELAY=3, /* obsolete since 2.6.25 */
-- NET_IPV4_ROUTE_GC_THRESH=4,
-- NET_IPV4_ROUTE_MAX_SIZE=5,
-- NET_IPV4_ROUTE_GC_MIN_INTERVAL=6,
-- NET_IPV4_ROUTE_GC_TIMEOUT=7,
-- NET_IPV4_ROUTE_GC_INTERVAL=8,
-- NET_IPV4_ROUTE_REDIRECT_LOAD=9,
-- NET_IPV4_ROUTE_REDIRECT_NUMBER=10,
-- NET_IPV4_ROUTE_REDIRECT_SILENCE=11,
-- NET_IPV4_ROUTE_ERROR_COST=12,
-- NET_IPV4_ROUTE_ERROR_BURST=13,
-- NET_IPV4_ROUTE_GC_ELASTICITY=14,
-- NET_IPV4_ROUTE_MTU_EXPIRES=15,
-- NET_IPV4_ROUTE_MIN_PMTU=16,
-- NET_IPV4_ROUTE_MIN_ADVMSS=17,
-- NET_IPV4_ROUTE_SECRET_INTERVAL=18,
-- NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS=19,
--};
--
--enum
--{
-- NET_PROTO_CONF_ALL=-2,
-- NET_PROTO_CONF_DEFAULT=-3
--
-- /* And device ifindices ... */
--};
--
--enum
--{
-- NET_IPV4_CONF_FORWARDING=1,
-- NET_IPV4_CONF_MC_FORWARDING=2,
-- NET_IPV4_CONF_PROXY_ARP=3,
-- NET_IPV4_CONF_ACCEPT_REDIRECTS=4,
-- NET_IPV4_CONF_SECURE_REDIRECTS=5,
-- NET_IPV4_CONF_SEND_REDIRECTS=6,
-- NET_IPV4_CONF_SHARED_MEDIA=7,
-- NET_IPV4_CONF_RP_FILTER=8,
-- NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE=9,
-- NET_IPV4_CONF_BOOTP_RELAY=10,
-- NET_IPV4_CONF_LOG_MARTIANS=11,
-- NET_IPV4_CONF_TAG=12,
-- NET_IPV4_CONF_ARPFILTER=13,
-- NET_IPV4_CONF_MEDIUM_ID=14,
-- NET_IPV4_CONF_NOXFRM=15,
-- NET_IPV4_CONF_NOPOLICY=16,
-- NET_IPV4_CONF_FORCE_IGMP_VERSION=17,
-- NET_IPV4_CONF_ARP_ANNOUNCE=18,
-- NET_IPV4_CONF_ARP_IGNORE=19,
-- NET_IPV4_CONF_PROMOTE_SECONDARIES=20,
-- NET_IPV4_CONF_ARP_ACCEPT=21,
-- __NET_IPV4_CONF_MAX
--};
--
--/* /proc/sys/net/ipv4/netfilter */
--enum
--{
-- NET_IPV4_NF_CONNTRACK_MAX=1,
-- NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
-- NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
-- NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
-- NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
-- NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
-- NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
-- NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
-- NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
-- NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT=10,
-- NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
-- NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT=12,
-- NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT=13,
-- NET_IPV4_NF_CONNTRACK_BUCKETS=14,
-- NET_IPV4_NF_CONNTRACK_LOG_INVALID=15,
-- NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16,
-- NET_IPV4_NF_CONNTRACK_TCP_LOOSE=17,
-- NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL=18,
-- NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS=19,
-- NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20,
-- NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21,
-- NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22,
-- NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23,
-- NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24,
-- NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
-- NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
-- NET_IPV4_NF_CONNTRACK_COUNT=27,
-- NET_IPV4_NF_CONNTRACK_CHECKSUM=28,
--};
--
--/* /proc/sys/net/ipv6 */
--enum {
-- NET_IPV6_CONF=16,
-- NET_IPV6_NEIGH=17,
-- NET_IPV6_ROUTE=18,
-- NET_IPV6_ICMP=19,
-- NET_IPV6_BINDV6ONLY=20,
-- NET_IPV6_IP6FRAG_HIGH_THRESH=21,
-- NET_IPV6_IP6FRAG_LOW_THRESH=22,
-- NET_IPV6_IP6FRAG_TIME=23,
-- NET_IPV6_IP6FRAG_SECRET_INTERVAL=24,
-- NET_IPV6_MLD_MAX_MSF=25,
--};
--
--enum {
-- NET_IPV6_ROUTE_FLUSH=1,
-- NET_IPV6_ROUTE_GC_THRESH=2,
-- NET_IPV6_ROUTE_MAX_SIZE=3,
-- NET_IPV6_ROUTE_GC_MIN_INTERVAL=4,
-- NET_IPV6_ROUTE_GC_TIMEOUT=5,
-- NET_IPV6_ROUTE_GC_INTERVAL=6,
-- NET_IPV6_ROUTE_GC_ELASTICITY=7,
-- NET_IPV6_ROUTE_MTU_EXPIRES=8,
-- NET_IPV6_ROUTE_MIN_ADVMSS=9,
-- NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS=10
--};
--
--enum {
-- NET_IPV6_FORWARDING=1,
-- NET_IPV6_HOP_LIMIT=2,
-- NET_IPV6_MTU=3,
-- NET_IPV6_ACCEPT_RA=4,
-- NET_IPV6_ACCEPT_REDIRECTS=5,
-- NET_IPV6_AUTOCONF=6,
-- NET_IPV6_DAD_TRANSMITS=7,
-- NET_IPV6_RTR_SOLICITS=8,
-- NET_IPV6_RTR_SOLICIT_INTERVAL=9,
-- NET_IPV6_RTR_SOLICIT_DELAY=10,
-- NET_IPV6_USE_TEMPADDR=11,
-- NET_IPV6_TEMP_VALID_LFT=12,
-- NET_IPV6_TEMP_PREFERED_LFT=13,
-- NET_IPV6_REGEN_MAX_RETRY=14,
-- NET_IPV6_MAX_DESYNC_FACTOR=15,
-- NET_IPV6_MAX_ADDRESSES=16,
-- NET_IPV6_FORCE_MLD_VERSION=17,
-- NET_IPV6_ACCEPT_RA_DEFRTR=18,
-- NET_IPV6_ACCEPT_RA_PINFO=19,
-- NET_IPV6_ACCEPT_RA_RTR_PREF=20,
-- NET_IPV6_RTR_PROBE_INTERVAL=21,
-- NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22,
-- NET_IPV6_PROXY_NDP=23,
-- NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
-- __NET_IPV6_MAX
--};
--
--/* /proc/sys/net/ipv6/icmp */
--enum {
-- NET_IPV6_ICMP_RATELIMIT=1
--};
--
--/* /proc/sys/net/<protocol>/neigh/<dev> */
--enum {
-- NET_NEIGH_MCAST_SOLICIT=1,
-- NET_NEIGH_UCAST_SOLICIT=2,
-- NET_NEIGH_APP_SOLICIT=3,
-- NET_NEIGH_RETRANS_TIME=4,
-- NET_NEIGH_REACHABLE_TIME=5,
-- NET_NEIGH_DELAY_PROBE_TIME=6,
-- NET_NEIGH_GC_STALE_TIME=7,
-- NET_NEIGH_UNRES_QLEN=8,
-- NET_NEIGH_PROXY_QLEN=9,
-- NET_NEIGH_ANYCAST_DELAY=10,
-- NET_NEIGH_PROXY_DELAY=11,
-- NET_NEIGH_LOCKTIME=12,
-- NET_NEIGH_GC_INTERVAL=13,
-- NET_NEIGH_GC_THRESH1=14,
-- NET_NEIGH_GC_THRESH2=15,
-- NET_NEIGH_GC_THRESH3=16,
-- NET_NEIGH_RETRANS_TIME_MS=17,
-- NET_NEIGH_REACHABLE_TIME_MS=18,
-- __NET_NEIGH_MAX
--};
--
--/* /proc/sys/net/dccp */
--enum {
-- NET_DCCP_DEFAULT=1,
--};
--
--/* /proc/sys/net/ipx */
--enum {
-- NET_IPX_PPROP_BROADCASTING=1,
-- NET_IPX_FORWARDING=2
--};
--
--/* /proc/sys/net/llc */
--enum {
-- NET_LLC2=1,
-- NET_LLC_STATION=2,
--};
--
--/* /proc/sys/net/llc/llc2 */
--enum {
-- NET_LLC2_TIMEOUT=1,
--};
--
--/* /proc/sys/net/llc/station */
--enum {
-- NET_LLC_STATION_ACK_TIMEOUT=1,
--};
--
--/* /proc/sys/net/llc/llc2/timeout */
--enum {
-- NET_LLC2_ACK_TIMEOUT=1,
-- NET_LLC2_P_TIMEOUT=2,
-- NET_LLC2_REJ_TIMEOUT=3,
-- NET_LLC2_BUSY_TIMEOUT=4,
--};
--
--/* /proc/sys/net/appletalk */
--enum {
-- NET_ATALK_AARP_EXPIRY_TIME=1,
-- NET_ATALK_AARP_TICK_TIME=2,
-- NET_ATALK_AARP_RETRANSMIT_LIMIT=3,
-- NET_ATALK_AARP_RESOLVE_TIME=4
--};
--
--
--/* /proc/sys/net/netrom */
--enum {
-- NET_NETROM_DEFAULT_PATH_QUALITY=1,
-- NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER=2,
-- NET_NETROM_NETWORK_TTL_INITIALISER=3,
-- NET_NETROM_TRANSPORT_TIMEOUT=4,
-- NET_NETROM_TRANSPORT_MAXIMUM_TRIES=5,
-- NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY=6,
-- NET_NETROM_TRANSPORT_BUSY_DELAY=7,
-- NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE=8,
-- NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT=9,
-- NET_NETROM_ROUTING_CONTROL=10,
-- NET_NETROM_LINK_FAILS_COUNT=11,
-- NET_NETROM_RESET=12
--};
--
--/* /proc/sys/net/ax25 */
--enum {
-- NET_AX25_IP_DEFAULT_MODE=1,
-- NET_AX25_DEFAULT_MODE=2,
-- NET_AX25_BACKOFF_TYPE=3,
-- NET_AX25_CONNECT_MODE=4,
-- NET_AX25_STANDARD_WINDOW=5,
-- NET_AX25_EXTENDED_WINDOW=6,
-- NET_AX25_T1_TIMEOUT=7,
-- NET_AX25_T2_TIMEOUT=8,
-- NET_AX25_T3_TIMEOUT=9,
-- NET_AX25_IDLE_TIMEOUT=10,
-- NET_AX25_N2=11,
-- NET_AX25_PACLEN=12,
-- NET_AX25_PROTOCOL=13,
-- NET_AX25_DAMA_SLAVE_TIMEOUT=14
--};
--
--/* /proc/sys/net/rose */
--enum {
-- NET_ROSE_RESTART_REQUEST_TIMEOUT=1,
-- NET_ROSE_CALL_REQUEST_TIMEOUT=2,
-- NET_ROSE_RESET_REQUEST_TIMEOUT=3,
-- NET_ROSE_CLEAR_REQUEST_TIMEOUT=4,
-- NET_ROSE_ACK_HOLD_BACK_TIMEOUT=5,
-- NET_ROSE_ROUTING_CONTROL=6,
-- NET_ROSE_LINK_FAIL_TIMEOUT=7,
-- NET_ROSE_MAX_VCS=8,
-- NET_ROSE_WINDOW_SIZE=9,
-- NET_ROSE_NO_ACTIVITY_TIMEOUT=10
--};
--
--/* /proc/sys/net/x25 */
--enum {
-- NET_X25_RESTART_REQUEST_TIMEOUT=1,
-- NET_X25_CALL_REQUEST_TIMEOUT=2,
-- NET_X25_RESET_REQUEST_TIMEOUT=3,
-- NET_X25_CLEAR_REQUEST_TIMEOUT=4,
-- NET_X25_ACK_HOLD_BACK_TIMEOUT=5,
-- NET_X25_FORWARD=6
--};
--
--/* /proc/sys/net/token-ring */
--enum
--{
-- NET_TR_RIF_TIMEOUT=1
--};
--
--/* /proc/sys/net/decnet/ */
--enum {
-- NET_DECNET_NODE_TYPE = 1,
-- NET_DECNET_NODE_ADDRESS = 2,
-- NET_DECNET_NODE_NAME = 3,
-- NET_DECNET_DEFAULT_DEVICE = 4,
-- NET_DECNET_TIME_WAIT = 5,
-- NET_DECNET_DN_COUNT = 6,
-- NET_DECNET_DI_COUNT = 7,
-- NET_DECNET_DR_COUNT = 8,
-- NET_DECNET_DST_GC_INTERVAL = 9,
-- NET_DECNET_CONF = 10,
-- NET_DECNET_NO_FC_MAX_CWND = 11,
-- NET_DECNET_MEM = 12,
-- NET_DECNET_RMEM = 13,
-- NET_DECNET_WMEM = 14,
-- NET_DECNET_DEBUG_LEVEL = 255
--};
--
--/* /proc/sys/net/decnet/conf/<dev> */
--enum {
-- NET_DECNET_CONF_LOOPBACK = -2,
-- NET_DECNET_CONF_DDCMP = -3,
-- NET_DECNET_CONF_PPP = -4,
-- NET_DECNET_CONF_X25 = -5,
-- NET_DECNET_CONF_GRE = -6,
-- NET_DECNET_CONF_ETHER = -7
--
-- /* ... and ifindex of devices */
--};
--
--/* /proc/sys/net/decnet/conf/<dev>/ */
--enum {
-- NET_DECNET_CONF_DEV_PRIORITY = 1,
-- NET_DECNET_CONF_DEV_T1 = 2,
-- NET_DECNET_CONF_DEV_T2 = 3,
-- NET_DECNET_CONF_DEV_T3 = 4,
-- NET_DECNET_CONF_DEV_FORWARDING = 5,
-- NET_DECNET_CONF_DEV_BLKSIZE = 6,
-- NET_DECNET_CONF_DEV_STATE = 7
--};
--
--/* /proc/sys/net/sctp */
--enum {
-- NET_SCTP_RTO_INITIAL = 1,
-- NET_SCTP_RTO_MIN = 2,
-- NET_SCTP_RTO_MAX = 3,
-- NET_SCTP_RTO_ALPHA = 4,
-- NET_SCTP_RTO_BETA = 5,
-- NET_SCTP_VALID_COOKIE_LIFE = 6,
-- NET_SCTP_ASSOCIATION_MAX_RETRANS = 7,
-- NET_SCTP_PATH_MAX_RETRANS = 8,
-- NET_SCTP_MAX_INIT_RETRANSMITS = 9,
-- NET_SCTP_HB_INTERVAL = 10,
-- NET_SCTP_PRESERVE_ENABLE = 11,
-- NET_SCTP_MAX_BURST = 12,
-- NET_SCTP_ADDIP_ENABLE = 13,
-- NET_SCTP_PRSCTP_ENABLE = 14,
-- NET_SCTP_SNDBUF_POLICY = 15,
-- NET_SCTP_SACK_TIMEOUT = 16,
-- NET_SCTP_RCVBUF_POLICY = 17,
--};
--
--/* /proc/sys/net/bridge */
--enum {
-- NET_BRIDGE_NF_CALL_ARPTABLES = 1,
-- NET_BRIDGE_NF_CALL_IPTABLES = 2,
-- NET_BRIDGE_NF_CALL_IP6TABLES = 3,
-- NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4,
-- NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5,
--};
--
--/* proc/sys/net/irda */
--enum {
-- NET_IRDA_DISCOVERY=1,
-- NET_IRDA_DEVNAME=2,
-- NET_IRDA_DEBUG=3,
-- NET_IRDA_FAST_POLL=4,
-- NET_IRDA_DISCOVERY_SLOTS=5,
-- NET_IRDA_DISCOVERY_TIMEOUT=6,
-- NET_IRDA_SLOT_TIMEOUT=7,
-- NET_IRDA_MAX_BAUD_RATE=8,
-- NET_IRDA_MIN_TX_TURN_TIME=9,
-- NET_IRDA_MAX_TX_DATA_SIZE=10,
-- NET_IRDA_MAX_TX_WINDOW=11,
-- NET_IRDA_MAX_NOREPLY_TIME=12,
-- NET_IRDA_WARN_NOREPLY_TIME=13,
-- NET_IRDA_LAP_KEEPALIVE_TIME=14,
--};
--
--
--/* CTL_FS names: */
--enum
--{
-- FS_NRINODE=1, /* int:current number of allocated inodes */
-- FS_STATINODE=2,
-- FS_MAXINODE=3, /* int:maximum number of inodes that can be allocated */
-- FS_NRDQUOT=4, /* int:current number of allocated dquots */
-- FS_MAXDQUOT=5, /* int:maximum number of dquots that can be allocated */
-- FS_NRFILE=6, /* int:current number of allocated filedescriptors */
-- FS_MAXFILE=7, /* int:maximum number of filedescriptors that can be allocated */
-- FS_DENTRY=8,
-- FS_NRSUPER=9, /* int:current number of allocated super_blocks */
-- FS_MAXSUPER=10, /* int:maximum number of super_blocks that can be allocated */
-- FS_OVERFLOWUID=11, /* int: overflow UID */
-- FS_OVERFLOWGID=12, /* int: overflow GID */
-- FS_LEASES=13, /* int: leases enabled */
-- FS_DIR_NOTIFY=14, /* int: directory notification enabled */
-- FS_LEASE_TIME=15, /* int: maximum time to wait for a lease break */
-- FS_DQSTATS=16, /* disc quota usage statistics and control */
-- FS_XFS=17, /* struct: control xfs parameters */
-- FS_AIO_NR=18, /* current system-wide number of aio requests */
-- FS_AIO_MAX_NR=19, /* system-wide maximum number of aio requests */
-- FS_INOTIFY=20, /* inotify submenu */
-- FS_OCFS2=988, /* ocfs2 */
--};
--
--/* /proc/sys/fs/quota/ */
--enum {
-- FS_DQ_LOOKUPS = 1,
-- FS_DQ_DROPS = 2,
-- FS_DQ_READS = 3,
-- FS_DQ_WRITES = 4,
-- FS_DQ_CACHE_HITS = 5,
-- FS_DQ_ALLOCATED = 6,
-- FS_DQ_FREE = 7,
-- FS_DQ_SYNCS = 8,
-- FS_DQ_WARNINGS = 9,
--};
--
--/* CTL_DEBUG names: */
--
--/* CTL_DEV names: */
--enum {
-- DEV_CDROM=1,
-- DEV_HWMON=2,
-- DEV_PARPORT=3,
-- DEV_RAID=4,
-- DEV_MAC_HID=5,
-- DEV_SCSI=6,
-- DEV_IPMI=7,
--};
--
--/* /proc/sys/dev/cdrom */
--enum {
-- DEV_CDROM_INFO=1,
-- DEV_CDROM_AUTOCLOSE=2,
-- DEV_CDROM_AUTOEJECT=3,
-- DEV_CDROM_DEBUG=4,
-- DEV_CDROM_LOCK=5,
-- DEV_CDROM_CHECK_MEDIA=6
--};
--
--/* /proc/sys/dev/parport */
--enum {
-- DEV_PARPORT_DEFAULT=-3
--};
--
--/* /proc/sys/dev/raid */
--enum {
-- DEV_RAID_SPEED_LIMIT_MIN=1,
-- DEV_RAID_SPEED_LIMIT_MAX=2
--};
--
--/* /proc/sys/dev/parport/default */
--enum {
-- DEV_PARPORT_DEFAULT_TIMESLICE=1,
-- DEV_PARPORT_DEFAULT_SPINTIME=2
--};
--
--/* /proc/sys/dev/parport/parport n */
--enum {
-- DEV_PARPORT_SPINTIME=1,
-- DEV_PARPORT_BASE_ADDR=2,
-- DEV_PARPORT_IRQ=3,
-- DEV_PARPORT_DMA=4,
-- DEV_PARPORT_MODES=5,
-- DEV_PARPORT_DEVICES=6,
-- DEV_PARPORT_AUTOPROBE=16
--};
--
--/* /proc/sys/dev/parport/parport n/devices/ */
--enum {
-- DEV_PARPORT_DEVICES_ACTIVE=-3,
--};
--
--/* /proc/sys/dev/parport/parport n/devices/device n */
--enum {
-- DEV_PARPORT_DEVICE_TIMESLICE=1,
--};
--
--/* /proc/sys/dev/mac_hid */
--enum {
-- DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES=1,
-- DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES=2,
-- DEV_MAC_HID_MOUSE_BUTTON_EMULATION=3,
-- DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE=4,
-- DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE=5,
-- DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES=6
--};
--
--/* /proc/sys/dev/scsi */
--enum {
-- DEV_SCSI_LOGGING_LEVEL=1,
--};
--
--/* /proc/sys/dev/ipmi */
--enum {
-- DEV_IPMI_POWEROFF_POWERCYCLE=1,
--};
--
--/* /proc/sys/abi */
--enum
--{
-- ABI_DEFHANDLER_COFF=1, /* default handler for coff binaries */
-- ABI_DEFHANDLER_ELF=2, /* default handler for ELF binaries */
-- ABI_DEFHANDLER_LCALL7=3,/* default handler for procs using lcall7 */
-- ABI_DEFHANDLER_LIBCSO=4,/* default handler for an libc.so ELF interp */
-- ABI_TRACE=5, /* tracing flags */
-- ABI_FAKE_UTSNAME=6, /* fake target utsname information */
--};
--
--#ifdef __KERNEL__
--#include <linux/list.h>
--
--/* For the /proc/sys support */
--struct ctl_table;
--struct nsproxy;
--struct ctl_table_root;
--
--struct ctl_table_set {
-- struct list_head list;
-- struct ctl_table_set *parent;
-- int (*is_seen)(struct ctl_table_set *);
--};
--
--extern void setup_sysctl_set(struct ctl_table_set *p,
-- struct ctl_table_set *parent,
-- int (*is_seen)(struct ctl_table_set *));
--
--struct ctl_table_header;
--
--extern void sysctl_head_get(struct ctl_table_header *);
--extern void sysctl_head_put(struct ctl_table_header *);
--extern int sysctl_is_seen(struct ctl_table_header *);
--extern struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *);
--extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev);
--extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces,
-- struct ctl_table_header *prev);
--extern void sysctl_head_finish(struct ctl_table_header *prev);
--extern int sysctl_perm(struct ctl_table_root *root,
-- struct ctl_table *table, int op);
--
--typedef struct ctl_table ctl_table;
--
--typedef int ctl_handler (struct ctl_table *table, int __user *name, int nlen,
-- void __user *oldval, size_t __user *oldlenp,
-- void __user *newval, size_t newlen);
--
--typedef int proc_handler (struct ctl_table *ctl, int write, struct file * filp,
-- void __user *buffer, size_t *lenp, loff_t *ppos);
--
--extern int proc_dostring(struct ctl_table *, int, struct file *,
-- void __user *, size_t *, loff_t *);
--extern int proc_dointvec(struct ctl_table *, int, struct file *,
-- void __user *, size_t *, loff_t *);
--extern int proc_dointvec_minmax(struct ctl_table *, int, struct file *,
-- void __user *, size_t *, loff_t *);
--extern int proc_dointvec_jiffies(struct ctl_table *, int, struct file *,
-- void __user *, size_t *, loff_t *);
--extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, struct file *,
-- void __user *, size_t *, loff_t *);
--extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, struct file *,
-- void __user *, size_t *, loff_t *);
--extern int proc_doulongvec_minmax(struct ctl_table *, int, struct file *,
-- void __user *, size_t *, loff_t *);
--extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
-- struct file *, void __user *, size_t *, loff_t *);
--
--extern int do_sysctl (int __user *name, int nlen,
-- void __user *oldval, size_t __user *oldlenp,
-- void __user *newval, size_t newlen);
--
--extern ctl_handler sysctl_data;
--extern ctl_handler sysctl_string;
--extern ctl_handler sysctl_intvec;
--extern ctl_handler sysctl_jiffies;
--extern ctl_handler sysctl_ms_jiffies;
--
--
--/*
-- * Register a set of sysctl names by calling register_sysctl_table
-- * with an initialised array of struct ctl_table's. An entry with zero
-- * ctl_name and NULL procname terminates the table. table->de will be
-- * set up by the registration and need not be initialised in advance.
-- *
-- * sysctl names can be mirrored automatically under /proc/sys. The
-- * procname supplied controls /proc naming.
-- *
-- * The table's mode will be honoured both for sys_sysctl(2) and
-- * proc-fs access.
-- *
-- * Leaf nodes in the sysctl tree will be represented by a single file
-- * under /proc; non-leaf nodes will be represented by directories. A
-- * null procname disables /proc mirroring at this node.
-- *
-- * sysctl entries with a zero ctl_name will not be available through
-- * the binary sysctl interface.
-- *
-- * sysctl(2) can automatically manage read and write requests through
-- * the sysctl table. The data and maxlen fields of the ctl_table
-- * struct enable minimal validation of the values being written to be
-- * performed, and the mode field allows minimal authentication.
-- *
-- * More sophisticated management can be enabled by the provision of a
-- * strategy routine with the table entry. This will be called before
-- * any automatic read or write of the data is performed.
-- *
-- * The strategy routine may return:
-- * <0: Error occurred (error is passed to user process)
-- * 0: OK - proceed with automatic read or write.
-- * >0: OK - read or write has been done by the strategy routine, so
-- * return immediately.
-- *
-- * There must be a proc_handler routine for any terminal nodes
-- * mirrored under /proc/sys (non-terminals are handled by a built-in
-- * directory handler). Several default handlers are available to
-- * cover common cases.
-- */
--
--/* A sysctl table is an array of struct ctl_table: */
--struct ctl_table
--{
-- int ctl_name; /* Binary ID */
-- const char *procname; /* Text ID for /proc/sys, or zero */
-- void *data;
-- int maxlen;
-- mode_t mode;
-- struct ctl_table *child;
-- struct ctl_table *parent; /* Automatically set */
-- proc_handler *proc_handler; /* Callback for text formatting */
-- ctl_handler *strategy; /* Callback function for all r/w */
-- void *extra1;
-- void *extra2;
--};
--
--struct ctl_table_root {
-- struct list_head root_list;
-- struct ctl_table_set default_set;
-- struct ctl_table_set *(*lookup)(struct ctl_table_root *root,
-- struct nsproxy *namespaces);
-- int (*permissions)(struct ctl_table_root *root,
-- struct nsproxy *namespaces, struct ctl_table *table);
--};
--
--/* struct ctl_table_header is used to maintain dynamic lists of
-- struct ctl_table trees. */
--struct ctl_table_header
--{
-- struct ctl_table *ctl_table;
-- struct list_head ctl_entry;
-- int used;
-- int count;
-- struct completion *unregistering;
-- struct ctl_table *ctl_table_arg;
-- struct ctl_table_root *root;
-- struct ctl_table_set *set;
-- struct ctl_table *attached_by;
-- struct ctl_table *attached_to;
-- struct ctl_table_header *parent;
--};
--
--/* struct ctl_path describes where in the hierarchy a table is added */
--struct ctl_path {
-- const char *procname;
-- int ctl_name;
--};
--
--void register_sysctl_root(struct ctl_table_root *root);
--struct ctl_table_header *__register_sysctl_paths(
-- struct ctl_table_root *root, struct nsproxy *namespaces,
-- const struct ctl_path *path, struct ctl_table *table);
--struct ctl_table_header *register_sysctl_table(struct ctl_table * table);
--struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
-- struct ctl_table *table);
--
--void unregister_sysctl_table(struct ctl_table_header * table);
--int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table);
--
--#endif /* __KERNEL__ */
--
--#endif /* _LINUX_SYSCTL_H */
-diff -Nurb linux-2.6.27-720/include/linux/textsearch.h linux-2.6.27-710/include/linux/textsearch.h
---- linux-2.6.27-720/include/linux/textsearch.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/linux/textsearch.h 2008-10-09 18:13:53.000000000 -0400
-@@ -162,9 +162,9 @@
- {
- struct ts_config *conf;
-
-- conf = (struct ts_config *) kzalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask);
-+ conf = kzalloc(TS_PRIV_ALIGN(sizeof(*conf)) + payload, gfp_mask);
- if (conf == NULL)
-- return (struct ts_config *) ERR_PTR(-ENOMEM);
-+ return ERR_PTR(-ENOMEM);
-
- return conf;
- }
-diff -Nurb linux-2.6.27-720/include/linux/types.h linux-2.6.27-710/include/linux/types.h
---- linux-2.6.27-720/include/linux/types.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/linux/types.h 2009-05-04 12:15:13.000000000 -0400
-@@ -30,9 +30,7 @@
- typedef __kernel_mqd_t mqd_t;
-
- #ifdef __KERNEL__
--#ifndef __cplusplus
- typedef _Bool bool;
--#endif
-
- typedef __kernel_uid32_t uid_t;
- typedef __kernel_gid32_t gid_t;
-@@ -211,12 +209,4 @@
-
- #endif /* __KERNEL__ */
-
--/*
-- * Click: Macros for defining empty structures. Needed because GCC's C and C++
-- * compilers have different ABIs for empty structures.
-- */
--
--#define EMPTY_STRUCT_DECL(s) struct s { int gcc_is_buggy; }
--#define EMPTY_STRUCT_INIT(s) (s) { 0 }
--
- #endif /* _LINUX_TYPES_H */
-diff -Nurb linux-2.6.27-720/include/linux/unwind.h linux-2.6.27-710/include/linux/unwind.h
---- linux-2.6.27-720/include/linux/unwind.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/linux/unwind.h 2008-10-09 18:13:53.000000000 -0400
-@@ -14,7 +14,7 @@
-
- struct module;
-
--EMPTY_STRUCT_DECL(unwind_frame_info);
-+struct unwind_frame_info {};
-
- static inline void unwind_init(void) {}
- static inline void unwind_setup(void) {}
-diff -Nurb linux-2.6.27-720/include/linux/wait.h linux-2.6.27-710/include/linux/wait.h
---- linux-2.6.27-720/include/linux/wait.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/linux/wait.h 2008-10-09 18:13:53.000000000 -0400
-@@ -485,7 +485,7 @@
- static inline int wait_on_bit(void *word, int bit,
- int (*action)(void *), unsigned mode)
- {
-- if (!test_bit(bit, (volatile unsigned long *) word))
-+ if (!test_bit(bit, word))
- return 0;
- return out_of_line_wait_on_bit(word, bit, action, mode);
- }
-@@ -509,7 +509,7 @@
- static inline int wait_on_bit_lock(void *word, int bit,
- int (*action)(void *), unsigned mode)
- {
-- if (!test_and_set_bit(bit, (volatile unsigned long *) word))
-+ if (!test_and_set_bit(bit, word))
- return 0;
- return out_of_line_wait_on_bit_lock(word, bit, action, mode);
- }
-diff -Nurb linux-2.6.27-720/include/net/compat.h linux-2.6.27-710/include/net/compat.h
---- linux-2.6.27-720/include/net/compat.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/net/compat.h 2008-10-09 18:13:53.000000000 -0400
-@@ -33,9 +33,9 @@
-
- extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
- extern int verify_compat_iovec(struct msghdr *, struct iovec *, struct sockaddr *, int);
--asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned);
--asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned);
--asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *);
-+extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned);
-+extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned);
-+extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *);
- extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
-
- extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int);
-diff -Nurb linux-2.6.27-720/include/net/neighbour.h linux-2.6.27-710/include/net/neighbour.h
---- linux-2.6.27-720/include/net/neighbour.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/net/neighbour.h 2008-10-09 18:13:53.000000000 -0400
-@@ -275,7 +275,7 @@
- struct neigh_parms *p,
- int p_id, int pdev_id,
- char *p_name,
-- proc_handler_t *proc_handler,
-+ proc_handler *proc_handler,
- ctl_handler *strategy);
- extern void neigh_sysctl_unregister(struct neigh_parms *p);
-
-diff -Nurb linux-2.6.27-720/include/net/netlink.h linux-2.6.27-710/include/net/netlink.h
---- linux-2.6.27-720/include/net/netlink.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/net/netlink.h 2008-10-09 18:13:53.000000000 -0400
-@@ -315,7 +315,7 @@
- static inline struct nlattr *nlmsg_attrdata(const struct nlmsghdr *nlh,
- int hdrlen)
- {
-- unsigned char *data = (unsigned char *) nlmsg_data(nlh);
-+ unsigned char *data = nlmsg_data(nlh);
- return (struct nlattr *) (data + NLMSG_ALIGN(hdrlen));
- }
-
-@@ -732,7 +732,7 @@
- */
- static inline struct nlattr *nla_find_nested(struct nlattr *nla, int attrtype)
- {
-- return nla_find((struct nlattr *) nla_data(nla), nla_len(nla), attrtype);
-+ return nla_find(nla_data(nla), nla_len(nla), attrtype);
- }
-
- /**
-@@ -748,7 +748,7 @@
- struct nlattr *nla,
- const struct nla_policy *policy)
- {
-- return nla_parse(tb, maxtype, (struct nlattr *) nla_data(nla), nla_len(nla), policy);
-+ return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy);
- }
-
- /**
-@@ -775,7 +775,7 @@
- if (nested_len < 0)
- return -EINVAL;
- if (nested_len >= nla_attr_size(0))
-- return nla_parse(tb, maxtype, (struct nlattr *) nla_data(nla) + NLA_ALIGN(len),
-+ return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
- nested_len, policy);
- memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
- return 0;
-@@ -1069,7 +1069,7 @@
- */
- static inline int nla_nest_compat_end(struct sk_buff *skb, struct nlattr *start)
- {
-- struct nlattr *nest = (struct nlattr *) (char *)start + NLMSG_ALIGN(start->nla_len);
-+ struct nlattr *nest = (void *)start + NLMSG_ALIGN(start->nla_len);
-
- start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start;
- return nla_nest_end(skb, nest);
-@@ -1103,7 +1103,7 @@
- static inline int nla_validate_nested(struct nlattr *start, int maxtype,
- const struct nla_policy *policy)
- {
-- return nla_validate((struct nlattr *) nla_data(start), nla_len(start), maxtype, policy);
-+ return nla_validate(nla_data(start), nla_len(start), maxtype, policy);
- }
-
- /**
-diff -Nurb linux-2.6.27-720/include/net/pkt_cls.h linux-2.6.27-710/include/net/pkt_cls.h
---- linux-2.6.27-720/include/net/pkt_cls.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/net/pkt_cls.h 2008-10-09 18:13:53.000000000 -0400
-@@ -302,7 +302,9 @@
-
- #else /* CONFIG_NET_EMATCH */
-
--EMPTY_STRUCT_DECL(tcf_ematch_tree);
-+struct tcf_ematch_tree
-+{
-+};
-
- #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
- #define tcf_em_tree_destroy(tp, t) do { (void)(t); } while(0)
-diff -Nurb linux-2.6.27-720/include/net/request_sock.h linux-2.6.27-710/include/net/request_sock.h
---- linux-2.6.27-720/include/net/request_sock.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/net/request_sock.h 2008-10-09 18:13:53.000000000 -0400
-@@ -60,7 +60,7 @@
-
- static inline struct request_sock *reqsk_alloc(const struct request_sock_ops *ops)
- {
-- struct request_sock *req = (struct request_sock *) kmem_cache_alloc(ops->slab, GFP_ATOMIC);
-+ struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC);
-
- if (req != NULL)
- req->rsk_ops = ops;
-diff -Nurb linux-2.6.27-720/include/net/route.h linux-2.6.27-710/include/net/route.h
---- linux-2.6.27-720/include/net/route.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/net/route.h 2009-05-04 12:15:13.000000000 -0400
-@@ -161,16 +161,6 @@
- __be16 sport, __be16 dport, struct sock *sk,
- int flags)
- {
--#ifdef __cplusplus
-- struct flowi fl;
-- fl.oif = oif;
-- fl.nl_u.ip4_u.daddr = dst;
-- fl.nl_u.ip4_u.saddr = src;
-- fl.nl_u.ip4_u.tos = tos;
-- fl.proto = protocol;
-- fl.uli_u.ports.sport = sport;
-- fl.uli_u.ports.dport = dport;
--#else
- struct flowi fl = { .oif = oif,
- .mark = sk->sk_mark,
- .nl_u = { .ip4_u = { .daddr = dst,
-@@ -180,7 +170,6 @@
- .uli_u = { .ports =
- { .sport = sport,
- .dport = dport } } };
--#endif
-
- int err;
- struct net *net = sock_net(sk);
-diff -Nurb linux-2.6.27-720/include/net/sock.h linux-2.6.27-710/include/net/sock.h
---- linux-2.6.27-720/include/net/sock.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/net/sock.h 2009-05-04 12:15:13.000000000 -0400
-@@ -1104,13 +1104,13 @@
- {
- if (skb->ip_summed == CHECKSUM_NONE) {
- int err = 0;
-- __wsum csum = csum_and_copy_from_user((unsigned char *) from,
-- (unsigned char *) page_address(page) + off,
-+ __wsum csum = csum_and_copy_from_user(from,
-+ page_address(page) + off,
- copy, 0, &err);
- if (err)
- return err;
- skb->csum = csum_block_add(skb->csum, csum, skb->len);
-- } else if (copy_from_user((char *) page_address(page) + off, from, copy))
-+ } else if (copy_from_user(page_address(page) + off, from, copy))
- return -EFAULT;
-
- skb->len += copy;
-diff -Nurb linux-2.6.27-720/include/rdma/ib_user_verbs.h linux-2.6.27-710/include/rdma/ib_user_verbs.h
---- linux-2.6.27-720/include/rdma/ib_user_verbs.h 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/include/rdma/ib_user_verbs.h 2008-10-09 18:13:53.000000000 -0400
-@@ -504,7 +504,8 @@
- __u64 driver_data[0];
- };
-
--EMPTY_STRUCT_DECL(ib_uverbs_modify_qp_resp);
-+struct ib_uverbs_modify_qp_resp {
-+};
-
- struct ib_uverbs_destroy_qp {
- __u64 response;
-diff -Nurb linux-2.6.27-720/kernel/sched.c linux-2.6.27-710/kernel/sched.c
---- linux-2.6.27-720/kernel/sched.c 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/kernel/sched.c 2009-05-04 12:15:14.000000000 -0400
-@@ -6298,7 +6298,7 @@
- static void
- set_table_entry(struct ctl_table *entry,
- const char *procname, void *data, int maxlen,
-- mode_t mode, proc_handler_t *proc_handler)
-+ mode_t mode, proc_handler *proc_handler)
- {
- entry->procname = procname;
- entry->data = data;
-diff -Nurb linux-2.6.27-720/net/core/dev.c linux-2.6.27-710/net/core/dev.c
---- linux-2.6.27-720/net/core/dev.c 2009-05-04 12:19:35.000000000 -0400
-+++ linux-2.6.27-710/net/core/dev.c 2009-05-04 12:16:04.000000000 -0400
-@@ -252,9 +252,6 @@
- write_unlock_bh(&dev_base_lock);
- }
-
--/* Click: input packet handlers, might steal packets from net_rx_action. */
--static RAW_NOTIFIER_HEAD(net_in_chain);
--
- /*
- * Our notifier list
- */
-@@ -2024,31 +2021,6 @@
- return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
- }
-
--
--/*
-- * Click: Allow Click to ask to intercept input packets.
-- */
--int
--register_net_in(struct notifier_block *nb)
--{
-- int err;
-- rtnl_lock();
-- err = raw_notifier_chain_register(&net_in_chain, nb);
-- rtnl_unlock();
-- return err;
--}
--
--int
--unregister_net_in(struct notifier_block *nb)
--{
-- int err;
-- rtnl_lock();
-- err = raw_notifier_chain_unregister(&net_in_chain, nb);
-- rtnl_unlock();
-- return err;
--}
--
--
- #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
- /* These hooks defined here for ATM */
- struct net_bridge;
-@@ -2219,14 +2191,14 @@
- * NET_RX_SUCCESS: no congestion
- * NET_RX_DROP: packet was dropped
- */
--int __netif_receive_skb(struct sk_buff *skb)
-+int netif_receive_skb(struct sk_buff *skb)
- {
- struct packet_type *ptype, *pt_prev;
- struct net_device *orig_dev;
- struct net_device *null_or_orig;
- int ret = NET_RX_DROP;
- int *cur_elevator = &__get_cpu_var(sknid_elevator);
-- //__be16 type;
-+ __be16 type;
-
- *cur_elevator = 0;
-
-@@ -2255,14 +2227,6 @@
- skb_reset_transport_header(skb);
- skb->mac_len = skb->network_header - skb->mac_header;
-
-- /* Click: may want to steal the packet */
-- if (notifier_data >= 0
-- && raw_notifier_call_chain(&net_in_chain,
-- notifier_data,
-- skb) & NOTIFY_STOP_MASK) {
-- return ret;
-- }
--
- pt_prev = NULL;
-
- rcu_read_lock();
-@@ -2381,8 +2345,7 @@
- }
- local_irq_enable();
-
-- __netif_receive_skb(skb, skb->protocol, skb_queue_len(&queue->input_pkt_queue));
--//XXX netif_receive_skb(skb);
-+ netif_receive_skb(skb);
- } while (++work < quota && jiffies == start_time);
-
- return work;
-@@ -4935,7 +4898,6 @@
- EXPORT_SYMBOL(dev_get_by_index);
- EXPORT_SYMBOL(dev_get_by_name);
- EXPORT_SYMBOL(dev_open);
--EXPORT_SYMBOL(dev_ioctl);
- EXPORT_SYMBOL(dev_queue_xmit);
- EXPORT_SYMBOL(dev_remove_pack);
- EXPORT_SYMBOL(dev_set_allmulti);
-@@ -4948,16 +4910,10 @@
- EXPORT_SYMBOL(netdev_set_master);
- EXPORT_SYMBOL(netdev_state_change);
- EXPORT_SYMBOL(netif_receive_skb);
--EXPORT_SYMBOL(__netif_receive_skb);
- EXPORT_SYMBOL(netif_rx);
- EXPORT_SYMBOL(register_gifconf);
- EXPORT_SYMBOL(register_netdevice);
- EXPORT_SYMBOL(register_netdevice_notifier);
--
--/* Click */
--EXPORT_SYMBOL(register_net_in);
--EXPORT_SYMBOL(unregister_net_in);
--
- EXPORT_SYMBOL(skb_checksum_help);
- EXPORT_SYMBOL(synchronize_net);
- EXPORT_SYMBOL(unregister_netdevice);
-diff -Nurb linux-2.6.27-720/net/core/dev.c.orig linux-2.6.27-710/net/core/dev.c.orig
---- linux-2.6.27-720/net/core/dev.c.orig 2009-05-04 12:16:04.000000000 -0400
-+++ linux-2.6.27-710/net/core/dev.c.orig 1969-12-31 19:00:00.000000000 -0500
-@@ -1,4936 +0,0 @@
--/*
-- * NET3 Protocol independent device support routines.
-- *
-- * This program is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License
-- * as published by the Free Software Foundation; either version
-- * 2 of the License, or (at your option) any later version.
-- *
-- * Derived from the non IP parts of dev.c 1.0.19
-- * Authors: Ross Biro
-- * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
-- * Mark Evans, <evansmp@uhura.aston.ac.uk>
-- *
-- * Additional Authors:
-- * Florian la Roche <rzsfl@rz.uni-sb.de>
-- * Alan Cox <gw4pts@gw4pts.ampr.org>
-- * David Hinds <dahinds@users.sourceforge.net>
-- * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
-- * Adam Sulmicki <adam@cfar.umd.edu>
-- * Pekka Riikonen <priikone@poesidon.pspt.fi>
-- *
-- * Changes:
-- * D.J. Barrow : Fixed bug where dev->refcnt gets set
-- * to 2 if register_netdev gets called
-- * before net_dev_init & also removed a
-- * few lines of code in the process.
-- * Alan Cox : device private ioctl copies fields back.
-- * Alan Cox : Transmit queue code does relevant
-- * stunts to keep the queue safe.
-- * Alan Cox : Fixed double lock.
-- * Alan Cox : Fixed promisc NULL pointer trap
-- * ???????? : Support the full private ioctl range
-- * Alan Cox : Moved ioctl permission check into
-- * drivers
-- * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
-- * Alan Cox : 100 backlog just doesn't cut it when
-- * you start doing multicast video 8)
-- * Alan Cox : Rewrote net_bh and list manager.
-- * Alan Cox : Fix ETH_P_ALL echoback lengths.
-- * Alan Cox : Took out transmit every packet pass
-- * Saved a few bytes in the ioctl handler
-- * Alan Cox : Network driver sets packet type before
-- * calling netif_rx. Saves a function
-- * call a packet.
-- * Alan Cox : Hashed net_bh()
-- * Richard Kooijman: Timestamp fixes.
-- * Alan Cox : Wrong field in SIOCGIFDSTADDR
-- * Alan Cox : Device lock protection.
-- * Alan Cox : Fixed nasty side effect of device close
-- * changes.
-- * Rudi Cilibrasi : Pass the right thing to
-- * set_mac_address()
-- * Dave Miller : 32bit quantity for the device lock to
-- * make it work out on a Sparc.
-- * Bjorn Ekwall : Added KERNELD hack.
-- * Alan Cox : Cleaned up the backlog initialise.
-- * Craig Metz : SIOCGIFCONF fix if space for under
-- * 1 device.
-- * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
-- * is no device open function.
-- * Andi Kleen : Fix error reporting for SIOCGIFCONF
-- * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
-- * Cyrus Durgin : Cleaned for KMOD
-- * Adam Sulmicki : Bug Fix : Network Device Unload
-- * A network device unload needs to purge
-- * the backlog queue.
-- * Paul Rusty Russell : SIOCSIFNAME
-- * Pekka Riikonen : Netdev boot-time settings code
-- * Andrew Morton : Make unregister_netdevice wait
-- * indefinitely on dev->refcnt
-- * J Hadi Salim : - Backlog queue sampling
-- * - netif_rx() feedback
-- */
--
--#include <asm/uaccess.h>
--#include <asm/system.h>
--#include <linux/bitops.h>
--#include <linux/capability.h>
--#include <linux/cpu.h>
--#include <linux/types.h>
--#include <linux/kernel.h>
--#include <linux/sched.h>
--#include <linux/mutex.h>
--#include <linux/string.h>
--#include <linux/mm.h>
--#include <linux/socket.h>
--#include <linux/sockios.h>
--#include <linux/errno.h>
--#include <linux/interrupt.h>
--#include <linux/if_ether.h>
--#include <linux/netdevice.h>
--#include <linux/etherdevice.h>
--#include <linux/ethtool.h>
--#include <linux/notifier.h>
--#include <linux/skbuff.h>
--#include <net/net_namespace.h>
--#include <net/sock.h>
--#include <linux/rtnetlink.h>
--#include <linux/proc_fs.h>
--#include <linux/seq_file.h>
--#include <linux/stat.h>
--#include <linux/ip.h>
--#include <linux/tcp.h>
--#include <linux/if_bridge.h>
--#include <linux/if_macvlan.h>
--#include <net/dst.h>
--#include <net/pkt_sched.h>
--#include <net/checksum.h>
--#include <linux/highmem.h>
--#include <linux/init.h>
--#include <linux/kmod.h>
--#include <linux/module.h>
--#include <linux/kallsyms.h>
--#include <linux/netpoll.h>
--#include <linux/rcupdate.h>
--#include <linux/delay.h>
--#include <net/wext.h>
--#include <net/iw_handler.h>
--#include <asm/current.h>
--#include <linux/audit.h>
--#include <linux/dmaengine.h>
--#include <linux/err.h>
--#include <linux/ctype.h>
--#include <linux/if_arp.h>
--#include <linux/if_vlan.h>
--#include <linux/ip.h>
--#include <net/ip.h>
--#include <linux/ipv6.h>
--#include <linux/in.h>
--#include <linux/jhash.h>
--#include <linux/random.h>
--#include <linux/vs_inet.h>
--
--#include "net-sysfs.h"
--
--/*
-- * The list of packet types we will receive (as opposed to discard)
-- * and the routines to invoke.
-- *
-- * Why 16. Because with 16 the only overlap we get on a hash of the
-- * low nibble of the protocol value is RARP/SNAP/X.25.
-- *
-- * NOTE: That is no longer true with the addition of VLAN tags. Not
-- * sure which should go first, but I bet it won't make much
-- * difference if we are running VLANs. The good news is that
-- * this protocol won't be in the list unless compiled in, so
-- * the average user (w/out VLANs) will not be adversely affected.
-- * --BLG
-- *
-- * 0800 IP
-- * 8100 802.1Q VLAN
-- * 0001 802.3
-- * 0002 AX.25
-- * 0004 802.2
-- * 8035 RARP
-- * 0005 SNAP
-- * 0805 X.25
-- * 0806 ARP
-- * 8137 IPX
-- * 0009 Localtalk
-- * 86DD IPv6
-- */
--
--#define PTYPE_HASH_SIZE (16)
--#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
--
--static DEFINE_SPINLOCK(ptype_lock);
--static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
--static struct list_head ptype_all __read_mostly; /* Taps */
--
--#ifdef CONFIG_NET_DMA
--struct net_dma {
-- struct dma_client client;
-- spinlock_t lock;
-- cpumask_t channel_mask;
-- struct dma_chan **channels;
--};
--
--static enum dma_state_client
--netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
-- enum dma_state state);
--
--static struct net_dma net_dma = {
-- .client = {
-- .event_callback = netdev_dma_event,
-- },
--};
--#endif
--
--/*
-- * The @dev_base_head list is protected by @dev_base_lock and the rtnl
-- * semaphore.
-- *
-- * Pure readers hold dev_base_lock for reading.
-- *
-- * Writers must hold the rtnl semaphore while they loop through the
-- * dev_base_head list, and hold dev_base_lock for writing when they do the
-- * actual updates. This allows pure readers to access the list even
-- * while a writer is preparing to update it.
-- *
-- * To put it another way, dev_base_lock is held for writing only to
-- * protect against pure readers; the rtnl semaphore provides the
-- * protection against other writers.
-- *
-- * See, for example usages, register_netdevice() and
-- * unregister_netdevice(), which must be called with the rtnl
-- * semaphore held.
-- */
--DEFINE_RWLOCK(dev_base_lock);
--
--EXPORT_SYMBOL(dev_base_lock);
--
--#define NETDEV_HASHBITS 8
--#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
--
--static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
--{
-- unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
-- return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
--}
--
--static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
--{
-- return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
--}
--
--/* Device list insertion */
--static int list_netdevice(struct net_device *dev)
--{
-- struct net *net = dev_net(dev);
--
-- ASSERT_RTNL();
--
-- write_lock_bh(&dev_base_lock);
-- list_add_tail(&dev->dev_list, &net->dev_base_head);
-- hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
-- hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
-- write_unlock_bh(&dev_base_lock);
-- return 0;
--}
--
--/* Device list removal */
--static void unlist_netdevice(struct net_device *dev)
--{
-- ASSERT_RTNL();
--
-- /* Unlink dev from the device chain */
-- write_lock_bh(&dev_base_lock);
-- list_del(&dev->dev_list);
-- hlist_del(&dev->name_hlist);
-- hlist_del(&dev->index_hlist);
-- write_unlock_bh(&dev_base_lock);
--}
--
--/*
-- * Our notifier list
-- */
--
--static RAW_NOTIFIER_HEAD(netdev_chain);
--
--/*
-- * Device drivers call our routines to queue packets here. We empty the
-- * queue in the local softnet handler.
-- */
--
--DEFINE_PER_CPU(struct softnet_data, softnet_data);
--
--#ifdef CONFIG_LOCKDEP
--/*
-- * register_netdevice() inits txq->_xmit_lock and sets lockdep class
-- * according to dev->type
-- */
--static const unsigned short netdev_lock_type[] =
-- {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
-- ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
-- ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
-- ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
-- ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
-- ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
-- ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
-- ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
-- ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
-- ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
-- ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
-- ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
-- ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
-- ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
-- ARPHRD_NONE};
--
--static const char *netdev_lock_name[] =
-- {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
-- "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
-- "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
-- "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
-- "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
-- "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
-- "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
-- "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
-- "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
-- "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
-- "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
-- "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
-- "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
-- "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
-- "_xmit_NONE"};
--
--static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
--static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
--
--static inline unsigned short netdev_lock_pos(unsigned short dev_type)
--{
-- int i;
--
-- for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
-- if (netdev_lock_type[i] == dev_type)
-- return i;
-- /* the last key is used by default */
-- return ARRAY_SIZE(netdev_lock_type) - 1;
--}
--
--static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
-- unsigned short dev_type)
--{
-- int i;
--
-- i = netdev_lock_pos(dev_type);
-- lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
-- netdev_lock_name[i]);
--}
--
--static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
--{
-- int i;
--
-- i = netdev_lock_pos(dev->type);
-- lockdep_set_class_and_name(&dev->addr_list_lock,
-- &netdev_addr_lock_key[i],
-- netdev_lock_name[i]);
--}
--#else
--static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
-- unsigned short dev_type)
--{
--}
--static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
--{
--}
--#endif
--
--/*******************************************************************************
--
-- Protocol management and registration routines
--
--*******************************************************************************/
--
--/*
-- * Add a protocol ID to the list. Now that the input handler is
-- * smarter we can dispense with all the messy stuff that used to be
-- * here.
-- *
-- * BEWARE!!! Protocol handlers, mangling input packets,
-- * MUST BE last in hash buckets and checking protocol handlers
-- * MUST start from promiscuous ptype_all chain in net_bh.
-- * It is true now, do not change it.
-- * Explanation follows: if protocol handler, mangling packet, will
-- * be the first on list, it is not able to sense, that packet
-- * is cloned and should be copied-on-write, so that it will
-- * change it and subsequent readers will get broken packet.
-- * --ANK (980803)
-- */
--
--/**
-- * dev_add_pack - add packet handler
-- * @pt: packet type declaration
-- *
-- * Add a protocol handler to the networking stack. The passed &packet_type
-- * is linked into kernel lists and may not be freed until it has been
-- * removed from the kernel lists.
-- *
-- * This call does not sleep therefore it can not
-- * guarantee all CPU's that are in middle of receiving packets
-- * will see the new packet type (until the next received packet).
-- */
--
--void dev_add_pack(struct packet_type *pt)
--{
-- int hash;
--
-- spin_lock_bh(&ptype_lock);
-- if (pt->type == htons(ETH_P_ALL))
-- list_add_rcu(&pt->list, &ptype_all);
-- else {
-- hash = ntohs(pt->type) & PTYPE_HASH_MASK;
-- list_add_rcu(&pt->list, &ptype_base[hash]);
-- }
-- spin_unlock_bh(&ptype_lock);
--}
--
--/**
-- * __dev_remove_pack - remove packet handler
-- * @pt: packet type declaration
-- *
-- * Remove a protocol handler that was previously added to the kernel
-- * protocol handlers by dev_add_pack(). The passed &packet_type is removed
-- * from the kernel lists and can be freed or reused once this function
-- * returns.
-- *
-- * The packet type might still be in use by receivers
-- * and must not be freed until after all the CPU's have gone
-- * through a quiescent state.
-- */
--void __dev_remove_pack(struct packet_type *pt)
--{
-- struct list_head *head;
-- struct packet_type *pt1;
--
-- spin_lock_bh(&ptype_lock);
--
-- if (pt->type == htons(ETH_P_ALL))
-- head = &ptype_all;
-- else
-- head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
--
-- list_for_each_entry(pt1, head, list) {
-- if (pt == pt1) {
-- list_del_rcu(&pt->list);
-- goto out;
-- }
-- }
--
-- printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
--out:
-- spin_unlock_bh(&ptype_lock);
--}
--/**
-- * dev_remove_pack - remove packet handler
-- * @pt: packet type declaration
-- *
-- * Remove a protocol handler that was previously added to the kernel
-- * protocol handlers by dev_add_pack(). The passed &packet_type is removed
-- * from the kernel lists and can be freed or reused once this function
-- * returns.
-- *
-- * This call sleeps to guarantee that no CPU is looking at the packet
-- * type after return.
-- */
--void dev_remove_pack(struct packet_type *pt)
--{
-- __dev_remove_pack(pt);
--
-- synchronize_net();
--}
--
--/******************************************************************************
--
-- Device Boot-time Settings Routines
--
--*******************************************************************************/
--
--/* Boot time configuration table */
--static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
--
--/**
-- * netdev_boot_setup_add - add new setup entry
-- * @name: name of the device
-- * @map: configured settings for the device
-- *
-- * Adds new setup entry to the dev_boot_setup list. The function
-- * returns 0 on error and 1 on success. This is a generic routine to
-- * all netdevices.
-- */
--static int netdev_boot_setup_add(char *name, struct ifmap *map)
--{
-- struct netdev_boot_setup *s;
-- int i;
--
-- s = dev_boot_setup;
-- for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
-- if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
-- memset(s[i].name, 0, sizeof(s[i].name));
-- strlcpy(s[i].name, name, IFNAMSIZ);
-- memcpy(&s[i].map, map, sizeof(s[i].map));
-- break;
-- }
-- }
--
-- return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
--}
--
--/**
-- * netdev_boot_setup_check - check boot time settings
-- * @dev: the netdevice
-- *
-- * Check boot time settings for the device.
-- * The found settings are set for the device to be used
-- * later in the device probing.
-- * Returns 0 if no settings found, 1 if they are.
-- */
--int netdev_boot_setup_check(struct net_device *dev)
--{
-- struct netdev_boot_setup *s = dev_boot_setup;
-- int i;
--
-- for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
-- if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
-- !strcmp(dev->name, s[i].name)) {
-- dev->irq = s[i].map.irq;
-- dev->base_addr = s[i].map.base_addr;
-- dev->mem_start = s[i].map.mem_start;
-- dev->mem_end = s[i].map.mem_end;
-- return 1;
-- }
-- }
-- return 0;
--}
--
--
--/**
-- * netdev_boot_base - get address from boot time settings
-- * @prefix: prefix for network device
-- * @unit: id for network device
-- *
-- * Check boot time settings for the base address of device.
-- * The found settings are set for the device to be used
-- * later in the device probing.
-- * Returns 0 if no settings found.
-- */
--unsigned long netdev_boot_base(const char *prefix, int unit)
--{
-- const struct netdev_boot_setup *s = dev_boot_setup;
-- char name[IFNAMSIZ];
-- int i;
--
-- sprintf(name, "%s%d", prefix, unit);
--
-- /*
-- * If device already registered then return base of 1
-- * to indicate not to probe for this interface
-- */
-- if (__dev_get_by_name(&init_net, name))
-- return 1;
--
-- for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
-- if (!strcmp(name, s[i].name))
-- return s[i].map.base_addr;
-- return 0;
--}
--
--/*
-- * Saves at boot time configured settings for any netdevice.
-- */
--int __init netdev_boot_setup(char *str)
--{
-- int ints[5];
-- struct ifmap map;
--
-- str = get_options(str, ARRAY_SIZE(ints), ints);
-- if (!str || !*str)
-- return 0;
--
-- /* Save settings */
-- memset(&map, 0, sizeof(map));
-- if (ints[0] > 0)
-- map.irq = ints[1];
-- if (ints[0] > 1)
-- map.base_addr = ints[2];
-- if (ints[0] > 2)
-- map.mem_start = ints[3];
-- if (ints[0] > 3)
-- map.mem_end = ints[4];
--
-- /* Add new entry to the list */
-- return netdev_boot_setup_add(str, &map);
--}
--
--__setup("netdev=", netdev_boot_setup);
--
--/*******************************************************************************
--
-- Device Interface Subroutines
--
--*******************************************************************************/
--
--/**
-- * __dev_get_by_name - find a device by its name
-- * @net: the applicable net namespace
-- * @name: name to find
-- *
-- * Find an interface by name. Must be called under RTNL semaphore
-- * or @dev_base_lock. If the name is found a pointer to the device
-- * is returned. If the name is not found then %NULL is returned. The
-- * reference counters are not incremented so the caller must be
-- * careful with locks.
-- */
--
--struct net_device *__dev_get_by_name(struct net *net, const char *name)
--{
-- struct hlist_node *p;
--
-- hlist_for_each(p, dev_name_hash(net, name)) {
-- struct net_device *dev
-- = hlist_entry(p, struct net_device, name_hlist);
-- if (!strncmp(dev->name, name, IFNAMSIZ))
-- return dev;
-- }
-- return NULL;
--}
--
--/**
-- * dev_get_by_name - find a device by its name
-- * @net: the applicable net namespace
-- * @name: name to find
-- *
-- * Find an interface by name. This can be called from any
-- * context and does its own locking. The returned handle has
-- * the usage count incremented and the caller must use dev_put() to
-- * release it when it is no longer needed. %NULL is returned if no
-- * matching device is found.
-- */
--
--struct net_device *dev_get_by_name(struct net *net, const char *name)
--{
-- struct net_device *dev;
--
-- read_lock(&dev_base_lock);
-- dev = __dev_get_by_name(net, name);
-- if (dev)
-- dev_hold(dev);
-- read_unlock(&dev_base_lock);
-- return dev;
--}
--
--/**
-- * __dev_get_by_index - find a device by its ifindex
-- * @net: the applicable net namespace
-- * @ifindex: index of device
-- *
-- * Search for an interface by index. Returns %NULL if the device
-- * is not found or a pointer to the device. The device has not
-- * had its reference counter increased so the caller must be careful
-- * about locking. The caller must hold either the RTNL semaphore
-- * or @dev_base_lock.
-- */
--
--struct net_device *__dev_get_by_index(struct net *net, int ifindex)
--{
-- struct hlist_node *p;
--
-- hlist_for_each(p, dev_index_hash(net, ifindex)) {
-- struct net_device *dev
-- = hlist_entry(p, struct net_device, index_hlist);
-- if (dev->ifindex == ifindex)
-- return dev;
-- }
-- return NULL;
--}
--
--
--/**
-- * dev_get_by_index - find a device by its ifindex
-- * @net: the applicable net namespace
-- * @ifindex: index of device
-- *
-- * Search for an interface by index. Returns NULL if the device
-- * is not found or a pointer to the device. The device returned has
-- * had a reference added and the pointer is safe until the user calls
-- * dev_put to indicate they have finished with it.
-- */
--
--struct net_device *dev_get_by_index(struct net *net, int ifindex)
--{
-- struct net_device *dev;
--
-- read_lock(&dev_base_lock);
-- dev = __dev_get_by_index(net, ifindex);
-- if (dev)
-- dev_hold(dev);
-- read_unlock(&dev_base_lock);
-- return dev;
--}
--
--/**
-- * dev_getbyhwaddr - find a device by its hardware address
-- * @net: the applicable net namespace
-- * @type: media type of device
-- * @ha: hardware address
-- *
-- * Search for an interface by MAC address. Returns NULL if the device
-- * is not found or a pointer to the device. The caller must hold the
-- * rtnl semaphore. The returned device has not had its ref count increased
-- * and the caller must therefore be careful about locking
-- *
-- * BUGS:
-- * If the API was consistent this would be __dev_get_by_hwaddr
-- */
--
--struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
--{
-- struct net_device *dev;
--
-- ASSERT_RTNL();
--
-- for_each_netdev(net, dev)
-- if (dev->type == type &&
-- !memcmp(dev->dev_addr, ha, dev->addr_len))
-- return dev;
--
-- return NULL;
--}
--
--EXPORT_SYMBOL(dev_getbyhwaddr);
--
--struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
--{
-- struct net_device *dev;
--
-- ASSERT_RTNL();
-- for_each_netdev(net, dev)
-- if (dev->type == type)
-- return dev;
--
-- return NULL;
--}
--
--EXPORT_SYMBOL(__dev_getfirstbyhwtype);
--
--struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
--{
-- struct net_device *dev;
--
-- rtnl_lock();
-- dev = __dev_getfirstbyhwtype(net, type);
-- if (dev)
-- dev_hold(dev);
-- rtnl_unlock();
-- return dev;
--}
--
--EXPORT_SYMBOL(dev_getfirstbyhwtype);
--
--/**
-- * dev_get_by_flags - find any device with given flags
-- * @net: the applicable net namespace
-- * @if_flags: IFF_* values
-- * @mask: bitmask of bits in if_flags to check
-- *
-- * Search for any interface with the given flags. Returns NULL if a device
-- * is not found or a pointer to the device. The device returned has
-- * had a reference added and the pointer is safe until the user calls
-- * dev_put to indicate they have finished with it.
-- */
--
--struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
--{
-- struct net_device *dev, *ret;
--
-- ret = NULL;
-- read_lock(&dev_base_lock);
-- for_each_netdev(net, dev) {
-- if (((dev->flags ^ if_flags) & mask) == 0) {
-- dev_hold(dev);
-- ret = dev;
-- break;
-- }
-- }
-- read_unlock(&dev_base_lock);
-- return ret;
--}
--
--/**
-- * dev_valid_name - check if name is okay for network device
-- * @name: name string
-- *
-- * Network device names need to be valid file names to
-- * to allow sysfs to work. We also disallow any kind of
-- * whitespace.
-- */
--int dev_valid_name(const char *name)
--{
-- if (*name == '\0')
-- return 0;
-- if (strlen(name) >= IFNAMSIZ)
-- return 0;
-- if (!strcmp(name, ".") || !strcmp(name, ".."))
-- return 0;
--
-- while (*name) {
-- if (*name == '/' || isspace(*name))
-- return 0;
-- name++;
-- }
-- return 1;
--}
--
--/**
-- * __dev_alloc_name - allocate a name for a device
-- * @net: network namespace to allocate the device name in
-- * @name: name format string
-- * @buf: scratch buffer and result name string
-- *
-- * Passed a format string - eg "lt%d" it will try and find a suitable
-- * id. It scans list of devices to build up a free map, then chooses
-- * the first empty slot. The caller must hold the dev_base or rtnl lock
-- * while allocating the name and adding the device in order to avoid
-- * duplicates.
-- * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
-- * Returns the number of the unit assigned or a negative errno code.
-- */
--
--static int __dev_alloc_name(struct net *net, const char *name, char *buf)
--{
-- int i = 0;
-- const char *p;
-- const int max_netdevices = 8*PAGE_SIZE;
-- unsigned long *inuse;
-- struct net_device *d;
--
-- p = strnchr(name, IFNAMSIZ-1, '%');
-- if (p) {
-- /*
-- * Verify the string as this thing may have come from
-- * the user. There must be either one "%d" and no other "%"
-- * characters.
-- */
-- if (p[1] != 'd' || strchr(p + 2, '%'))
-- return -EINVAL;
--
-- /* Use one page as a bit array of possible slots */
-- inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
-- if (!inuse)
-- return -ENOMEM;
--
-- for_each_netdev(net, d) {
-- if (!sscanf(d->name, name, &i))
-- continue;
-- if (i < 0 || i >= max_netdevices)
-- continue;
--
-- /* avoid cases where sscanf is not exact inverse of printf */
-- snprintf(buf, IFNAMSIZ, name, i);
-- if (!strncmp(buf, d->name, IFNAMSIZ))
-- set_bit(i, inuse);
-- }
--
-- i = find_first_zero_bit(inuse, max_netdevices);
-- free_page((unsigned long) inuse);
-- }
--
-- snprintf(buf, IFNAMSIZ, name, i);
-- if (!__dev_get_by_name(net, buf))
-- return i;
--
-- /* It is possible to run out of possible slots
-- * when the name is long and there isn't enough space left
-- * for the digits, or if all bits are used.
-- */
-- return -ENFILE;
--}
--
--/**
-- * dev_alloc_name - allocate a name for a device
-- * @dev: device
-- * @name: name format string
-- *
-- * Passed a format string - eg "lt%d" it will try and find a suitable
-- * id. It scans list of devices to build up a free map, then chooses
-- * the first empty slot. The caller must hold the dev_base or rtnl lock
-- * while allocating the name and adding the device in order to avoid
-- * duplicates.
-- * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
-- * Returns the number of the unit assigned or a negative errno code.
-- */
--
--int dev_alloc_name(struct net_device *dev, const char *name)
--{
-- char buf[IFNAMSIZ];
-- struct net *net;
-- int ret;
--
-- BUG_ON(!dev_net(dev));
-- net = dev_net(dev);
-- ret = __dev_alloc_name(net, name, buf);
-- if (ret >= 0)
-- strlcpy(dev->name, buf, IFNAMSIZ);
-- return ret;
--}
--
--
--/**
-- * dev_change_name - change name of a device
-- * @dev: device
-- * @newname: name (or format string) must be at least IFNAMSIZ
-- *
-- * Change name of a device, can pass format strings "eth%d".
-- * for wildcarding.
-- */
--int dev_change_name(struct net_device *dev, char *newname)
--{
-- char oldname[IFNAMSIZ];
-- int err = 0;
-- int ret;
-- struct net *net;
--
-- ASSERT_RTNL();
-- BUG_ON(!dev_net(dev));
--
-- net = dev_net(dev);
-- if (dev->flags & IFF_UP)
-- return -EBUSY;
--
-- if (!dev_valid_name(newname))
-- return -EINVAL;
--
-- if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
-- return 0;
--
-- memcpy(oldname, dev->name, IFNAMSIZ);
--
-- if (strchr(newname, '%')) {
-- err = dev_alloc_name(dev, newname);
-- if (err < 0)
-- return err;
-- strcpy(newname, dev->name);
-- }
-- else if (__dev_get_by_name(net, newname))
-- return -EEXIST;
-- else
-- strlcpy(dev->name, newname, IFNAMSIZ);
--
--rollback:
-- /* For now only devices in the initial network namespace
-- * are in sysfs.
-- */
-- if (net == &init_net) {
-- ret = device_rename(&dev->dev, dev->name);
-- if (ret) {
-- memcpy(dev->name, oldname, IFNAMSIZ);
-- return ret;
-- }
-- }
--
-- write_lock_bh(&dev_base_lock);
-- hlist_del(&dev->name_hlist);
-- hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
-- write_unlock_bh(&dev_base_lock);
--
-- ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
-- ret = notifier_to_errno(ret);
--
-- if (ret) {
-- if (err) {
-- printk(KERN_ERR
-- "%s: name change rollback failed: %d.\n",
-- dev->name, ret);
-- } else {
-- err = ret;
-- memcpy(dev->name, oldname, IFNAMSIZ);
-- goto rollback;
-- }
-- }
--
-- return err;
--}
--
--/**
-- * netdev_features_change - device changes features
-- * @dev: device to cause notification
-- *
-- * Called to indicate a device has changed features.
-- */
--void netdev_features_change(struct net_device *dev)
--{
-- call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
--}
--EXPORT_SYMBOL(netdev_features_change);
--
--/**
-- * netdev_state_change - device changes state
-- * @dev: device to cause notification
-- *
-- * Called to indicate a device has changed state. This function calls
-- * the notifier chains for netdev_chain and sends a NEWLINK message
-- * to the routing socket.
-- */
--void netdev_state_change(struct net_device *dev)
--{
-- if (dev->flags & IFF_UP) {
-- call_netdevice_notifiers(NETDEV_CHANGE, dev);
-- rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
-- }
--}
--
--void netdev_bonding_change(struct net_device *dev)
--{
-- call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
--}
--EXPORT_SYMBOL(netdev_bonding_change);
--
--/**
-- * dev_load - load a network module
-- * @net: the applicable net namespace
-- * @name: name of interface
-- *
-- * If a network interface is not present and the process has suitable
-- * privileges this function loads the module. If module loading is not
-- * available in this kernel then it becomes a nop.
-- */
--
--void dev_load(struct net *net, const char *name)
--{
-- struct net_device *dev;
--
-- read_lock(&dev_base_lock);
-- dev = __dev_get_by_name(net, name);
-- read_unlock(&dev_base_lock);
--
-- if (!dev && capable(CAP_SYS_MODULE))
-- request_module("%s", name);
--}
--
--/**
-- * dev_open - prepare an interface for use.
-- * @dev: device to open
-- *
-- * Takes a device from down to up state. The device's private open
-- * function is invoked and then the multicast lists are loaded. Finally
-- * the device is moved into the up state and a %NETDEV_UP message is
-- * sent to the netdev notifier chain.
-- *
-- * Calling this function on an active interface is a nop. On a failure
-- * a negative errno code is returned.
-- */
--int dev_open(struct net_device *dev)
--{
-- int ret = 0;
--
-- ASSERT_RTNL();
--
-- /*
-- * Is it already up?
-- */
--
-- if (dev->flags & IFF_UP)
-- return 0;
--
-- /*
-- * Is it even present?
-- */
-- if (!netif_device_present(dev))
-- return -ENODEV;
--
-- /*
-- * Call device private open method
-- */
-- set_bit(__LINK_STATE_START, &dev->state);
--
-- if (dev->validate_addr)
-- ret = dev->validate_addr(dev);
--
-- if (!ret && dev->open)
-- ret = dev->open(dev);
--
-- /*
-- * If it went open OK then:
-- */
--
-- if (ret)
-- clear_bit(__LINK_STATE_START, &dev->state);
-- else {
-- /*
-- * Set the flags.
-- */
-- dev->flags |= IFF_UP;
--
-- /*
-- * Initialize multicasting status
-- */
-- dev_set_rx_mode(dev);
--
-- /*
-- * Wakeup transmit queue engine
-- */
-- dev_activate(dev);
--
-- /*
-- * ... and announce new interface.
-- */
-- call_netdevice_notifiers(NETDEV_UP, dev);
-- }
--
-- return ret;
--}
--
--/**
-- * dev_close - shutdown an interface.
-- * @dev: device to shutdown
-- *
-- * This function moves an active device into down state. A
-- * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
-- * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
-- * chain.
-- */
--int dev_close(struct net_device *dev)
--{
-- ASSERT_RTNL();
--
-- might_sleep();
--
-- if (!(dev->flags & IFF_UP))
-- return 0;
--
-- /*
-- * Tell people we are going down, so that they can
-- * prepare to death, when device is still operating.
-- */
-- call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
--
-- clear_bit(__LINK_STATE_START, &dev->state);
--
-- /* Synchronize to scheduled poll. We cannot touch poll list,
-- * it can be even on different cpu. So just clear netif_running().
-- *
-- * dev->stop() will invoke napi_disable() on all of it's
-- * napi_struct instances on this device.
-- */
-- smp_mb__after_clear_bit(); /* Commit netif_running(). */
--
-- dev_deactivate(dev);
--
-- /*
-- * Call the device specific close. This cannot fail.
-- * Only if device is UP
-- *
-- * We allow it to be called even after a DETACH hot-plug
-- * event.
-- */
-- if (dev->stop)
-- dev->stop(dev);
--
-- /*
-- * Device is now down.
-- */
--
-- dev->flags &= ~IFF_UP;
--
-- /*
-- * Tell people we are down
-- */
-- call_netdevice_notifiers(NETDEV_DOWN, dev);
--
-- return 0;
--}
--
--
--/**
-- * dev_disable_lro - disable Large Receive Offload on a device
-- * @dev: device
-- *
-- * Disable Large Receive Offload (LRO) on a net device. Must be
-- * called under RTNL. This is needed if received packets may be
-- * forwarded to another interface.
-- */
--void dev_disable_lro(struct net_device *dev)
--{
-- if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
-- dev->ethtool_ops->set_flags) {
-- u32 flags = dev->ethtool_ops->get_flags(dev);
-- if (flags & ETH_FLAG_LRO) {
-- flags &= ~ETH_FLAG_LRO;
-- dev->ethtool_ops->set_flags(dev, flags);
-- }
-- }
-- WARN_ON(dev->features & NETIF_F_LRO);
--}
--EXPORT_SYMBOL(dev_disable_lro);
--
--
--static int dev_boot_phase = 1;
--
--/*
-- * Device change register/unregister. These are not inline or static
-- * as we export them to the world.
-- */
--
--/**
-- * register_netdevice_notifier - register a network notifier block
-- * @nb: notifier
-- *
-- * Register a notifier to be called when network device events occur.
-- * The notifier passed is linked into the kernel structures and must
-- * not be reused until it has been unregistered. A negative errno code
-- * is returned on a failure.
-- *
-- * When registered all registration and up events are replayed
-- * to the new notifier to allow device to have a race free
-- * view of the network device list.
-- */
--
--int register_netdevice_notifier(struct notifier_block *nb)
--{
-- struct net_device *dev;
-- struct net_device *last;
-- struct net *net;
-- int err;
--
-- rtnl_lock();
-- err = raw_notifier_chain_register(&netdev_chain, nb);
-- if (err)
-- goto unlock;
-- if (dev_boot_phase)
-- goto unlock;
-- for_each_net(net) {
-- for_each_netdev(net, dev) {
-- err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
-- err = notifier_to_errno(err);
-- if (err)
-- goto rollback;
--
-- if (!(dev->flags & IFF_UP))
-- continue;
--
-- nb->notifier_call(nb, NETDEV_UP, dev);
-- }
-- }
--
--unlock:
-- rtnl_unlock();
-- return err;
--
--rollback:
-- last = dev;
-- for_each_net(net) {
-- for_each_netdev(net, dev) {
-- if (dev == last)
-- break;
--
-- if (dev->flags & IFF_UP) {
-- nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
-- nb->notifier_call(nb, NETDEV_DOWN, dev);
-- }
-- nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
-- }
-- }
--
-- raw_notifier_chain_unregister(&netdev_chain, nb);
-- goto unlock;
--}
--
--/**
-- * unregister_netdevice_notifier - unregister a network notifier block
-- * @nb: notifier
-- *
-- * Unregister a notifier previously registered by
-- * register_netdevice_notifier(). The notifier is unlinked into the
-- * kernel structures and may then be reused. A negative errno code
-- * is returned on a failure.
-- */
--
--int unregister_netdevice_notifier(struct notifier_block *nb)
--{
-- int err;
--
-- rtnl_lock();
-- err = raw_notifier_chain_unregister(&netdev_chain, nb);
-- rtnl_unlock();
-- return err;
--}
--
--/**
-- * call_netdevice_notifiers - call all network notifier blocks
-- * @val: value passed unmodified to notifier function
-- * @dev: net_device pointer passed unmodified to notifier function
-- *
-- * Call all network notifier blocks. Parameters and return value
-- * are as for raw_notifier_call_chain().
-- */
--
--int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
--{
-- return raw_notifier_call_chain(&netdev_chain, val, dev);
--}
--
--/* When > 0 there are consumers of rx skb time stamps */
--static atomic_t netstamp_needed = ATOMIC_INIT(0);
--
--void net_enable_timestamp(void)
--{
-- atomic_inc(&netstamp_needed);
--}
--
--void net_disable_timestamp(void)
--{
-- atomic_dec(&netstamp_needed);
--}
--
--static inline void net_timestamp(struct sk_buff *skb)
--{
-- if (atomic_read(&netstamp_needed))
-- __net_timestamp(skb);
-- else
-- skb->tstamp.tv64 = 0;
--}
--
--/*
-- * Support routine. Sends outgoing frames to any network
-- * taps currently in use.
-- */
--
--static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
--{
-- struct packet_type *ptype;
--
-- net_timestamp(skb);
--
-- rcu_read_lock();
-- list_for_each_entry_rcu(ptype, &ptype_all, list) {
-- /* Never send packets back to the socket
-- * they originated from - MvS (miquels@drinkel.ow.org)
-- */
-- if ((ptype->dev == dev || !ptype->dev) &&
-- (ptype->af_packet_priv == NULL ||
-- (struct sock *)ptype->af_packet_priv != skb->sk)) {
-- struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
-- if (!skb2)
-- break;
--
-- /* skb->nh should be correctly
-- set by sender, so that the second statement is
-- just protection against buggy protocols.
-- */
-- skb_reset_mac_header(skb2);
--
-- if (skb_network_header(skb2) < skb2->data ||
-- skb2->network_header > skb2->tail) {
-- if (net_ratelimit())
-- printk(KERN_CRIT "protocol %04x is "
-- "buggy, dev %s\n",
-- skb2->protocol, dev->name);
-- skb_reset_network_header(skb2);
-- }
--
-- skb2->transport_header = skb2->network_header;
-- skb2->pkt_type = PACKET_OUTGOING;
-- ptype->func(skb2, skb->dev, ptype, skb->dev);
-- }
-- }
-- rcu_read_unlock();
--}
--
--
--static inline void __netif_reschedule(struct Qdisc *q)
--{
-- struct softnet_data *sd;
-- unsigned long flags;
--
-- local_irq_save(flags);
-- sd = &__get_cpu_var(softnet_data);
-- q->next_sched = sd->output_queue;
-- sd->output_queue = q;
-- raise_softirq_irqoff(NET_TX_SOFTIRQ);
-- local_irq_restore(flags);
--}
--
--void __netif_schedule(struct Qdisc *q)
--{
-- if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
-- __netif_reschedule(q);
--}
--EXPORT_SYMBOL(__netif_schedule);
--
--void dev_kfree_skb_irq(struct sk_buff *skb)
--{
-- if (atomic_dec_and_test(&skb->users)) {
-- struct softnet_data *sd;
-- unsigned long flags;
--
-- local_irq_save(flags);
-- sd = &__get_cpu_var(softnet_data);
-- skb->next = sd->completion_queue;
-- sd->completion_queue = skb;
-- raise_softirq_irqoff(NET_TX_SOFTIRQ);
-- local_irq_restore(flags);
-- }
--}
--EXPORT_SYMBOL(dev_kfree_skb_irq);
--
--void dev_kfree_skb_any(struct sk_buff *skb)
--{
-- if (in_irq() || irqs_disabled())
-- dev_kfree_skb_irq(skb);
-- else
-- dev_kfree_skb(skb);
--}
--EXPORT_SYMBOL(dev_kfree_skb_any);
--
--
--/**
-- * netif_device_detach - mark device as removed
-- * @dev: network device
-- *
-- * Mark device as removed from system and therefore no longer available.
-- */
--void netif_device_detach(struct net_device *dev)
--{
-- if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
-- netif_running(dev)) {
-- netif_stop_queue(dev);
-- }
--}
--EXPORT_SYMBOL(netif_device_detach);
--
--/**
-- * netif_device_attach - mark device as attached
-- * @dev: network device
-- *
-- * Mark device as attached from system and restart if needed.
-- */
--void netif_device_attach(struct net_device *dev)
--{
-- if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
-- netif_running(dev)) {
-- netif_wake_queue(dev);
-- __netdev_watchdog_up(dev);
-- }
--}
--EXPORT_SYMBOL(netif_device_attach);
--
--static bool can_checksum_protocol(unsigned long features, __be16 protocol)
--{
-- return ((features & NETIF_F_GEN_CSUM) ||
-- ((features & NETIF_F_IP_CSUM) &&
-- protocol == htons(ETH_P_IP)) ||
-- ((features & NETIF_F_IPV6_CSUM) &&
-- protocol == htons(ETH_P_IPV6)));
--}
--
--static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
--{
-- if (can_checksum_protocol(dev->features, skb->protocol))
-- return true;
--
-- if (skb->protocol == htons(ETH_P_8021Q)) {
-- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
-- if (can_checksum_protocol(dev->features & dev->vlan_features,
-- veh->h_vlan_encapsulated_proto))
-- return true;
-- }
--
-- return false;
--}
--
--/*
-- * Invalidate hardware checksum when packet is to be mangled, and
-- * complete checksum manually on outgoing path.
-- */
--int skb_checksum_help(struct sk_buff *skb)
--{
-- __wsum csum;
-- int ret = 0, offset;
--
-- if (skb->ip_summed == CHECKSUM_COMPLETE)
-- goto out_set_summed;
--
-- if (unlikely(skb_shinfo(skb)->gso_size)) {
-- /* Let GSO fix up the checksum. */
-- goto out_set_summed;
-- }
--
-- offset = skb->csum_start - skb_headroom(skb);
-- BUG_ON(offset >= skb_headlen(skb));
-- csum = skb_checksum(skb, offset, skb->len - offset, 0);
--
-- offset += skb->csum_offset;
-- BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
--
-- if (skb_cloned(skb) &&
-- !skb_clone_writable(skb, offset + sizeof(__sum16))) {
-- ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-- if (ret)
-- goto out;
-- }
--
-- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
--out_set_summed:
-- skb->ip_summed = CHECKSUM_NONE;
--out:
-- return ret;
--}
--
--/**
-- * skb_gso_segment - Perform segmentation on skb.
-- * @skb: buffer to segment
-- * @features: features for the output path (see dev->features)
-- *
-- * This function segments the given skb and returns a list of segments.
-- *
-- * It may return NULL if the skb requires no segmentation. This is
-- * only possible when GSO is used for verifying header integrity.
-- */
--struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
--{
-- struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
-- struct packet_type *ptype;
-- __be16 type = skb->protocol;
-- int err;
--
-- BUG_ON(skb_shinfo(skb)->frag_list);
--
-- skb_reset_mac_header(skb);
-- skb->mac_len = skb->network_header - skb->mac_header;
-- __skb_pull(skb, skb->mac_len);
--
-- if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
-- if (skb_header_cloned(skb) &&
-- (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
-- return ERR_PTR(err);
-- }
--
-- rcu_read_lock();
-- list_for_each_entry_rcu(ptype,
-- &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
-- if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
-- if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
-- err = ptype->gso_send_check(skb);
-- segs = ERR_PTR(err);
-- if (err || skb_gso_ok(skb, features))
-- break;
-- __skb_push(skb, (skb->data -
-- skb_network_header(skb)));
-- }
-- segs = ptype->gso_segment(skb, features);
-- break;
-- }
-- }
-- rcu_read_unlock();
--
-- __skb_push(skb, skb->data - skb_mac_header(skb));
--
-- return segs;
--}
--
--EXPORT_SYMBOL(skb_gso_segment);
--
--/* Take action when hardware reception checksum errors are detected. */
--#ifdef CONFIG_BUG
--void netdev_rx_csum_fault(struct net_device *dev)
--{
-- if (net_ratelimit()) {
-- printk(KERN_ERR "%s: hw csum failure.\n",
-- dev ? dev->name : "<unknown>");
-- dump_stack();
-- }
--}
--EXPORT_SYMBOL(netdev_rx_csum_fault);
--#endif
--
--/* Actually, we should eliminate this check as soon as we know, that:
-- * 1. IOMMU is present and allows to map all the memory.
-- * 2. No high memory really exists on this machine.
-- */
--
--static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
--{
--#ifdef CONFIG_HIGHMEM
-- int i;
--
-- if (dev->features & NETIF_F_HIGHDMA)
-- return 0;
--
-- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-- if (PageHighMem(skb_shinfo(skb)->frags[i].page))
-- return 1;
--
--#endif
-- return 0;
--}
--
--struct dev_gso_cb {
-- void (*destructor)(struct sk_buff *skb);
--};
--
--#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
--
--static void dev_gso_skb_destructor(struct sk_buff *skb)
--{
-- struct dev_gso_cb *cb;
--
-- do {
-- struct sk_buff *nskb = skb->next;
--
-- skb->next = nskb->next;
-- nskb->next = NULL;
-- kfree_skb(nskb);
-- } while (skb->next);
--
-- cb = DEV_GSO_CB(skb);
-- if (cb->destructor)
-- cb->destructor(skb);
--}
--
--/**
-- * dev_gso_segment - Perform emulated hardware segmentation on skb.
-- * @skb: buffer to segment
-- *
-- * This function segments the given skb and stores the list of segments
-- * in skb->next.
-- */
--static int dev_gso_segment(struct sk_buff *skb)
--{
-- struct net_device *dev = skb->dev;
-- struct sk_buff *segs;
-- int features = dev->features & ~(illegal_highdma(dev, skb) ?
-- NETIF_F_SG : 0);
--
-- segs = skb_gso_segment(skb, features);
--
-- /* Verifying header integrity only. */
-- if (!segs)
-- return 0;
--
-- if (IS_ERR(segs))
-- return PTR_ERR(segs);
--
-- skb->next = segs;
-- DEV_GSO_CB(skb)->destructor = skb->destructor;
-- skb->destructor = dev_gso_skb_destructor;
--
-- return 0;
--}
--
--int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
-- struct netdev_queue *txq)
--{
-- if (likely(!skb->next)) {
-- if (!list_empty(&ptype_all))
-- dev_queue_xmit_nit(skb, dev);
--
-- if (netif_needs_gso(dev, skb)) {
-- if (unlikely(dev_gso_segment(skb)))
-- goto out_kfree_skb;
-- if (skb->next)
-- goto gso;
-- }
--
-- return dev->hard_start_xmit(skb, dev);
-- }
--
--gso:
-- do {
-- struct sk_buff *nskb = skb->next;
-- int rc;
--
-- skb->next = nskb->next;
-- nskb->next = NULL;
-- rc = dev->hard_start_xmit(nskb, dev);
-- if (unlikely(rc)) {
-- nskb->next = skb->next;
-- skb->next = nskb;
-- return rc;
-- }
-- if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
-- return NETDEV_TX_BUSY;
-- } while (skb->next);
--
-- skb->destructor = DEV_GSO_CB(skb)->destructor;
--
--out_kfree_skb:
-- kfree_skb(skb);
-- return 0;
--}
--
--static u32 simple_tx_hashrnd;
--static int simple_tx_hashrnd_initialized = 0;
--
--static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
--{
-- u32 addr1, addr2, ports;
-- u32 hash, ihl;
-- u8 ip_proto = 0;
--
-- if (unlikely(!simple_tx_hashrnd_initialized)) {
-- get_random_bytes(&simple_tx_hashrnd, 4);
-- simple_tx_hashrnd_initialized = 1;
-- }
--
-- switch (skb->protocol) {
-- case __constant_htons(ETH_P_IP):
-- if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
-- ip_proto = ip_hdr(skb)->protocol;
-- addr1 = ip_hdr(skb)->saddr;
-- addr2 = ip_hdr(skb)->daddr;
-- ihl = ip_hdr(skb)->ihl;
-- break;
-- case __constant_htons(ETH_P_IPV6):
-- ip_proto = ipv6_hdr(skb)->nexthdr;
-- addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
-- addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
-- ihl = (40 >> 2);
-- break;
-- default:
-- return 0;
-- }
--
--
-- switch (ip_proto) {
-- case IPPROTO_TCP:
-- case IPPROTO_UDP:
-- case IPPROTO_DCCP:
-- case IPPROTO_ESP:
-- case IPPROTO_AH:
-- case IPPROTO_SCTP:
-- case IPPROTO_UDPLITE:
-- ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
-- break;
--
-- default:
-- ports = 0;
-- break;
-- }
--
-- hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
--
-- return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
--}
--
--static struct netdev_queue *dev_pick_tx(struct net_device *dev,
-- struct sk_buff *skb)
--{
-- u16 queue_index = 0;
--
-- if (dev->select_queue)
-- queue_index = dev->select_queue(dev, skb);
-- else if (dev->real_num_tx_queues > 1)
-- queue_index = simple_tx_hash(dev, skb);
--
-- skb_set_queue_mapping(skb, queue_index);
-- return netdev_get_tx_queue(dev, queue_index);
--}
--
--/**
-- * dev_queue_xmit - transmit a buffer
-- * @skb: buffer to transmit
-- *
-- * Queue a buffer for transmission to a network device. The caller must
-- * have set the device and priority and built the buffer before calling
-- * this function. The function can be called from an interrupt.
-- *
-- * A negative errno code is returned on a failure. A success does not
-- * guarantee the frame will be transmitted as it may be dropped due
-- * to congestion or traffic shaping.
-- *
-- * -----------------------------------------------------------------------------------
-- * I notice this method can also return errors from the queue disciplines,
-- * including NET_XMIT_DROP, which is a positive value. So, errors can also
-- * be positive.
-- *
-- * Regardless of the return value, the skb is consumed, so it is currently
-- * difficult to retry a send to this method. (You can bump the ref count
-- * before sending to hold a reference for retry if you are careful.)
-- *
-- * When calling this method, interrupts MUST be enabled. This is because
-- * the BH enable code must have IRQs enabled so that it will not deadlock.
-- * --BLG
-- */
--int dev_queue_xmit(struct sk_buff *skb)
--{
-- struct net_device *dev = skb->dev;
-- struct netdev_queue *txq;
-- struct Qdisc *q;
-- int rc = -ENOMEM;
--
-- /* GSO will handle the following emulations directly. */
-- if (netif_needs_gso(dev, skb))
-- goto gso;
--
-- if (skb_shinfo(skb)->frag_list &&
-- !(dev->features & NETIF_F_FRAGLIST) &&
-- __skb_linearize(skb))
-- goto out_kfree_skb;
--
-- /* Fragmented skb is linearized if device does not support SG,
-- * or if at least one of fragments is in highmem and device
-- * does not support DMA from it.
-- */
-- if (skb_shinfo(skb)->nr_frags &&
-- (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
-- __skb_linearize(skb))
-- goto out_kfree_skb;
--
-- /* If packet is not checksummed and device does not support
-- * checksumming for this protocol, complete checksumming here.
-- */
-- if (skb->ip_summed == CHECKSUM_PARTIAL) {
-- skb_set_transport_header(skb, skb->csum_start -
-- skb_headroom(skb));
-- if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
-- goto out_kfree_skb;
-- }
--
--gso:
-- /* Disable soft irqs for various locks below. Also
-- * stops preemption for RCU.
-- */
-- rcu_read_lock_bh();
--
-- txq = dev_pick_tx(dev, skb);
-- q = rcu_dereference(txq->qdisc);
--
--#ifdef CONFIG_NET_CLS_ACT
-- skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
--#endif
-- if (q->enqueue) {
-- spinlock_t *root_lock = qdisc_lock(q);
--
-- spin_lock(root_lock);
--
-- if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
-- kfree_skb(skb);
-- rc = NET_XMIT_DROP;
-- } else {
-- rc = qdisc_enqueue_root(skb, q);
-- qdisc_run(q);
-- }
-- spin_unlock(root_lock);
--
-- goto out;
-- }
--
-- /* The device has no queue. Common case for software devices:
-- loopback, all the sorts of tunnels...
--
-- Really, it is unlikely that netif_tx_lock protection is necessary
-- here. (f.e. loopback and IP tunnels are clean ignoring statistics
-- counters.)
-- However, it is possible, that they rely on protection
-- made by us here.
--
-- Check this and shot the lock. It is not prone from deadlocks.
-- Either shot noqueue qdisc, it is even simpler 8)
-- */
-- if (dev->flags & IFF_UP) {
-- int cpu = smp_processor_id(); /* ok because BHs are off */
--
-- if (txq->xmit_lock_owner != cpu) {
--
-- HARD_TX_LOCK(dev, txq, cpu);
--
-- if (!netif_tx_queue_stopped(txq)) {
-- rc = 0;
-- if (!dev_hard_start_xmit(skb, dev, txq)) {
-- HARD_TX_UNLOCK(dev, txq);
-- goto out;
-- }
-- }
-- HARD_TX_UNLOCK(dev, txq);
-- if (net_ratelimit())
-- printk(KERN_CRIT "Virtual device %s asks to "
-- "queue packet!\n", dev->name);
-- } else {
-- /* Recursion is detected! It is possible,
-- * unfortunately */
-- if (net_ratelimit())
-- printk(KERN_CRIT "Dead loop on virtual device "
-- "%s, fix it urgently!\n", dev->name);
-- }
-- }
--
-- rc = -ENETDOWN;
-- rcu_read_unlock_bh();
--
--out_kfree_skb:
-- kfree_skb(skb);
-- return rc;
--out:
-- rcu_read_unlock_bh();
-- return rc;
--}
--
--
--/*=======================================================================
-- Receiver routines
-- =======================================================================*/
--
--int netdev_max_backlog __read_mostly = 1000;
--int netdev_budget __read_mostly = 300;
--int weight_p __read_mostly = 64; /* old backlog weight */
--
--DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
--
--
--/**
-- * netif_rx - post buffer to the network code
-- * @skb: buffer to post
-- *
-- * This function receives a packet from a device driver and queues it for
-- * the upper (protocol) levels to process. It always succeeds. The buffer
-- * may be dropped during processing for congestion control or by the
-- * protocol layers.
-- *
-- * return values:
-- * NET_RX_SUCCESS (no congestion)
-- * NET_RX_DROP (packet was dropped)
-- *
-- */
--
--int netif_rx(struct sk_buff *skb)
--{
-- struct softnet_data *queue;
-- unsigned long flags;
--
-- /* if netpoll wants it, pretend we never saw it */
-- if (netpoll_rx(skb))
-- return NET_RX_DROP;
--
-- if (!skb->tstamp.tv64)
-- net_timestamp(skb);
--
-- /*
-- * The code is rearranged so that the path is the most
-- * short when CPU is congested, but is still operating.
-- */
-- local_irq_save(flags);
-- queue = &__get_cpu_var(softnet_data);
--
-- __get_cpu_var(netdev_rx_stat).total++;
-- if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
-- if (queue->input_pkt_queue.qlen) {
--enqueue:
-- __skb_queue_tail(&queue->input_pkt_queue, skb);
-- local_irq_restore(flags);
-- return NET_RX_SUCCESS;
-- }
--
-- napi_schedule(&queue->backlog);
-- goto enqueue;
-- }
--
-- __get_cpu_var(netdev_rx_stat).dropped++;
-- local_irq_restore(flags);
--
-- kfree_skb(skb);
-- return NET_RX_DROP;
--}
--
--int netif_rx_ni(struct sk_buff *skb)
--{
-- int err;
--
-- preempt_disable();
-- err = netif_rx(skb);
-- if (local_softirq_pending())
-- do_softirq();
-- preempt_enable();
--
-- return err;
--}
--
--EXPORT_SYMBOL(netif_rx_ni);
--
--static void net_tx_action(struct softirq_action *h)
--{
-- struct softnet_data *sd = &__get_cpu_var(softnet_data);
--
-- if (sd->completion_queue) {
-- struct sk_buff *clist;
--
-- local_irq_disable();
-- clist = sd->completion_queue;
-- sd->completion_queue = NULL;
-- local_irq_enable();
--
-- while (clist) {
-- struct sk_buff *skb = clist;
-- clist = clist->next;
--
-- WARN_ON(atomic_read(&skb->users));
-- __kfree_skb(skb);
-- }
-- }
--
-- if (sd->output_queue) {
-- struct Qdisc *head;
--
-- local_irq_disable();
-- head = sd->output_queue;
-- sd->output_queue = NULL;
-- local_irq_enable();
--
-- while (head) {
-- struct Qdisc *q = head;
-- spinlock_t *root_lock;
--
-- head = head->next_sched;
--
-- root_lock = qdisc_lock(q);
-- if (spin_trylock(root_lock)) {
-- smp_mb__before_clear_bit();
-- clear_bit(__QDISC_STATE_SCHED,
-- &q->state);
-- qdisc_run(q);
-- spin_unlock(root_lock);
-- } else {
-- if (!test_bit(__QDISC_STATE_DEACTIVATED,
-- &q->state)) {
-- __netif_reschedule(q);
-- } else {
-- smp_mb__before_clear_bit();
-- clear_bit(__QDISC_STATE_SCHED,
-- &q->state);
-- }
-- }
-- }
-- }
--}
--
--static inline int deliver_skb(struct sk_buff *skb,
-- struct packet_type *pt_prev,
-- struct net_device *orig_dev)
--{
-- atomic_inc(&skb->users);
-- return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
--}
--
--#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
--/* These hooks defined here for ATM */
--struct net_bridge;
--struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
-- unsigned char *addr);
--void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
--
--/*
-- * If bridge module is loaded call bridging hook.
-- * returns NULL if packet was consumed.
-- */
--struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
-- struct sk_buff *skb) __read_mostly;
--static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
-- struct packet_type **pt_prev, int *ret,
-- struct net_device *orig_dev)
--{
-- struct net_bridge_port *port;
--
-- if (skb->pkt_type == PACKET_LOOPBACK ||
-- (port = rcu_dereference(skb->dev->br_port)) == NULL)
-- return skb;
--
-- if (*pt_prev) {
-- *ret = deliver_skb(skb, *pt_prev, orig_dev);
-- *pt_prev = NULL;
-- }
--
-- return br_handle_frame_hook(port, skb);
--}
--#else
--#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
--#endif
--
--#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
--struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
--EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
--
--static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
-- struct packet_type **pt_prev,
-- int *ret,
-- struct net_device *orig_dev)
--{
-- if (skb->dev->macvlan_port == NULL)
-- return skb;
--
-- if (*pt_prev) {
-- *ret = deliver_skb(skb, *pt_prev, orig_dev);
-- *pt_prev = NULL;
-- }
-- return macvlan_handle_frame_hook(skb);
--}
--#else
--#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
--#endif
--
--#ifdef CONFIG_NET_CLS_ACT
--/* TODO: Maybe we should just force sch_ingress to be compiled in
-- * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
-- * a compare and 2 stores extra right now if we dont have it on
-- * but have CONFIG_NET_CLS_ACT
-- * NOTE: This doesnt stop any functionality; if you dont have
-- * the ingress scheduler, you just cant add policies on ingress.
-- *
-- */
--static int ing_filter(struct sk_buff *skb)
--{
-- struct net_device *dev = skb->dev;
-- u32 ttl = G_TC_RTTL(skb->tc_verd);
-- struct netdev_queue *rxq;
-- int result = TC_ACT_OK;
-- struct Qdisc *q;
--
-- if (MAX_RED_LOOP < ttl++) {
-- printk(KERN_WARNING
-- "Redir loop detected Dropping packet (%d->%d)\n",
-- skb->iif, dev->ifindex);
-- return TC_ACT_SHOT;
-- }
--
-- skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
-- skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
--
-- rxq = &dev->rx_queue;
--
-- q = rxq->qdisc;
-- if (q != &noop_qdisc) {
-- spin_lock(qdisc_lock(q));
-- if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
-- result = qdisc_enqueue_root(skb, q);
-- spin_unlock(qdisc_lock(q));
-- }
--
-- return result;
--}
--
--static inline struct sk_buff *handle_ing(struct sk_buff *skb,
-- struct packet_type **pt_prev,
-- int *ret, struct net_device *orig_dev)
--{
-- if (skb->dev->rx_queue.qdisc == &noop_qdisc)
-- goto out;
--
-- if (*pt_prev) {
-- *ret = deliver_skb(skb, *pt_prev, orig_dev);
-- *pt_prev = NULL;
-- } else {
-- /* Huh? Why does turning on AF_PACKET affect this? */
-- skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
-- }
--
-- switch (ing_filter(skb)) {
-- case TC_ACT_SHOT:
-- case TC_ACT_STOLEN:
-- kfree_skb(skb);
-- return NULL;
-- }
--
--out:
-- skb->tc_verd = 0;
-- return skb;
--}
--#endif
--
--/*
-- * netif_nit_deliver - deliver received packets to network taps
-- * @skb: buffer
-- *
-- * This function is used to deliver incoming packets to network
-- * taps. It should be used when the normal netif_receive_skb path
-- * is bypassed, for example because of VLAN acceleration.
-- */
--void netif_nit_deliver(struct sk_buff *skb)
--{
-- struct packet_type *ptype;
--
-- if (list_empty(&ptype_all))
-- return;
--
-- skb_reset_network_header(skb);
-- skb_reset_transport_header(skb);
-- skb->mac_len = skb->network_header - skb->mac_header;
--
-- rcu_read_lock();
-- list_for_each_entry_rcu(ptype, &ptype_all, list) {
-- if (!ptype->dev || ptype->dev == skb->dev)
-- deliver_skb(skb, ptype, skb->dev);
-- }
-- rcu_read_unlock();
--}
--
--/* The code already makes the assumption that packet handlers run
-- * sequentially on the same CPU. -Sapan */
--DEFINE_PER_CPU(int, sknid_elevator) = 0;
--
--/**
-- * netif_receive_skb - process receive buffer from network
-- * @skb: buffer to process
-- *
-- * netif_receive_skb() is the main receive data processing function.
-- * It always succeeds. The buffer may be dropped during processing
-- * for congestion control or by the protocol layers.
-- *
-- * This function may only be called from softirq context and interrupts
-- * should be enabled.
-- *
-- * Return values (usually ignored):
-- * NET_RX_SUCCESS: no congestion
-- * NET_RX_DROP: packet was dropped
-- */
--int netif_receive_skb(struct sk_buff *skb)
--{
-- struct packet_type *ptype, *pt_prev;
-- struct net_device *orig_dev;
-- struct net_device *null_or_orig;
-- int ret = NET_RX_DROP;
-- int *cur_elevator = &__get_cpu_var(sknid_elevator);
-- __be16 type;
--
-- *cur_elevator = 0;
--
-- /* if we've gotten here through NAPI, check netpoll */
-- if (netpoll_receive_skb(skb))
-- return NET_RX_DROP;
--
-- if (!skb->tstamp.tv64)
-- net_timestamp(skb);
--
-- if (!skb->iif)
-- skb->iif = skb->dev->ifindex;
--
-- null_or_orig = NULL;
-- orig_dev = skb->dev;
-- if (orig_dev->master) {
-- if (skb_bond_should_drop(skb))
-- null_or_orig = orig_dev; /* deliver only exact match */
-- else
-- skb->dev = orig_dev->master;
-- }
--
-- __get_cpu_var(netdev_rx_stat).total++;
--
-- skb_reset_network_header(skb);
-- skb_reset_transport_header(skb);
-- skb->mac_len = skb->network_header - skb->mac_header;
--
-- pt_prev = NULL;
--
-- rcu_read_lock();
--
-- /* Don't receive packets in an exiting network namespace */
-- if (!net_alive(dev_net(skb->dev)))
-- goto out;
--
--#ifdef CONFIG_NET_CLS_ACT
-- if (skb->tc_verd & TC_NCLS) {
-- skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
-- goto ncls;
-- }
--#endif
--
-- list_for_each_entry_rcu(ptype, &ptype_all, list) {
-- if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
-- ptype->dev == orig_dev) {
-- if (pt_prev)
-- ret = deliver_skb(skb, pt_prev, orig_dev);
-- pt_prev = ptype;
-- }
-- }
--
--#ifdef CONFIG_NET_CLS_ACT
-- skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
-- if (!skb)
-- goto out;
--ncls:
--#endif
--
-- skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
-- if (!skb)
-- goto out;
-- skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
-- if (!skb)
-- goto out;
--
-- type = skb->protocol;
-- list_for_each_entry_rcu(ptype,
-- &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
-- if (ptype->type == type &&
-- (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
-- ptype->dev == orig_dev)) {
-- if (pt_prev)
-- ret = deliver_skb(skb, pt_prev, orig_dev);
-- pt_prev = ptype;
-- }
-- }
--
-- if (pt_prev) {
-- /* At this point, cur_elevator may be -2 or a positive value, in
-- * case a previous protocol handler marked it */
-- if (*cur_elevator) {
-- atomic_inc(&skb->users);
-- }
--
-- ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
--
-- if ((*cur_elevator)>0) {
-- skb->skb_tag = *cur_elevator;
-- list_for_each_entry_rcu(ptype, &ptype_all, list) {
-- if ((!ptype->dev || ptype->dev == skb->dev) && (ptype->sknid_elevator)) {
-- ret = deliver_skb(skb, ptype, orig_dev);
-- }
-- }
-- }
--
-- if (*cur_elevator) {
-- /* We have a packet */
-- kfree_skb(skb);
-- }
-- } else {
-- kfree_skb(skb);
-- /* Jamal, now you will not able to escape explaining
-- * me how you were going to use this. :-)
-- */
-- ret = NET_RX_DROP;
-- }
--
--out:
-- rcu_read_unlock();
-- return ret;
--}
--
--/* Network device is going away, flush any packets still pending */
--static void flush_backlog(void *arg)
--{
-- struct net_device *dev = arg;
-- struct softnet_data *queue = &__get_cpu_var(softnet_data);
-- struct sk_buff *skb, *tmp;
--
-- skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
-- if (skb->dev == dev) {
-- __skb_unlink(skb, &queue->input_pkt_queue);
-- kfree_skb(skb);
-- }
--}
--
--static int process_backlog(struct napi_struct *napi, int quota)
--{
-- int work = 0;
-- struct softnet_data *queue = &__get_cpu_var(softnet_data);
-- unsigned long start_time = jiffies;
--
-- napi->weight = weight_p;
-- do {
-- struct sk_buff *skb;
--
-- local_irq_disable();
-- skb = __skb_dequeue(&queue->input_pkt_queue);
-- if (!skb) {
-- __napi_complete(napi);
-- local_irq_enable();
-- break;
-- }
-- local_irq_enable();
--
-- netif_receive_skb(skb);
-- } while (++work < quota && jiffies == start_time);
--
-- return work;
--}
--
--/**
-- * __napi_schedule - schedule for receive
-- * @n: entry to schedule
-- *
-- * The entry's receive function will be scheduled to run
-- */
--void __napi_schedule(struct napi_struct *n)
--{
-- unsigned long flags;
--
-- local_irq_save(flags);
-- list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
-- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
-- local_irq_restore(flags);
--}
--EXPORT_SYMBOL(__napi_schedule);
--
--
--static void net_rx_action(struct softirq_action *h)
--{
-- struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
-- unsigned long start_time = jiffies;
-- int budget = netdev_budget;
-- void *have;
--
-- local_irq_disable();
--
-- while (!list_empty(list)) {
-- struct napi_struct *n;
-- int work, weight;
--
-- /* If softirq window is exhuasted then punt.
-- *
-- * Note that this is a slight policy change from the
-- * previous NAPI code, which would allow up to 2
-- * jiffies to pass before breaking out. The test
-- * used to be "jiffies - start_time > 1".
-- */
-- if (unlikely(budget <= 0 || jiffies != start_time))
-- goto softnet_break;
--
-- local_irq_enable();
--
-- /* Even though interrupts have been re-enabled, this
-- * access is safe because interrupts can only add new
-- * entries to the tail of this list, and only ->poll()
-- * calls can remove this head entry from the list.
-- */
-- n = list_entry(list->next, struct napi_struct, poll_list);
--
-- have = netpoll_poll_lock(n);
--
-- weight = n->weight;
--
-- /* This NAPI_STATE_SCHED test is for avoiding a race
-- * with netpoll's poll_napi(). Only the entity which
-- * obtains the lock and sees NAPI_STATE_SCHED set will
-- * actually make the ->poll() call. Therefore we avoid
-- * accidently calling ->poll() when NAPI is not scheduled.
-- */
-- work = 0;
-- if (test_bit(NAPI_STATE_SCHED, &n->state))
-- work = n->poll(n, weight);
--
-- WARN_ON_ONCE(work > weight);
--
-- budget -= work;
--
-- local_irq_disable();
--
-- /* Drivers must not modify the NAPI state if they
-- * consume the entire weight. In such cases this code
-- * still "owns" the NAPI instance and therefore can
-- * move the instance around on the list at-will.
-- */
-- if (unlikely(work == weight)) {
-- if (unlikely(napi_disable_pending(n)))
-- __napi_complete(n);
-- else
-- list_move_tail(&n->poll_list, list);
-- }
--
-- netpoll_poll_unlock(have);
-- }
--out:
-- local_irq_enable();
--
--#ifdef CONFIG_NET_DMA
-- /*
-- * There may not be any more sk_buffs coming right now, so push
-- * any pending DMA copies to hardware
-- */
-- if (!cpus_empty(net_dma.channel_mask)) {
-- int chan_idx;
-- for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
-- struct dma_chan *chan = net_dma.channels[chan_idx];
-- if (chan)
-- dma_async_memcpy_issue_pending(chan);
-- }
-- }
--#endif
--
-- return;
--
--softnet_break:
-- __get_cpu_var(netdev_rx_stat).time_squeeze++;
-- __raise_softirq_irqoff(NET_RX_SOFTIRQ);
-- goto out;
--}
--
--static gifconf_func_t * gifconf_list [NPROTO];
--
--/**
-- * register_gifconf - register a SIOCGIF handler
-- * @family: Address family
-- * @gifconf: Function handler
-- *
-- * Register protocol dependent address dumping routines. The handler
-- * that is passed must not be freed or reused until it has been replaced
-- * by another handler.
-- */
--int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
--{
-- if (family >= NPROTO)
-- return -EINVAL;
-- gifconf_list[family] = gifconf;
-- return 0;
--}
--
--
--/*
-- * Map an interface index to its name (SIOCGIFNAME)
-- */
--
--/*
-- * We need this ioctl for efficient implementation of the
-- * if_indextoname() function required by the IPv6 API. Without
-- * it, we would have to search all the interfaces to find a
-- * match. --pb
-- */
--
--static int dev_ifname(struct net *net, struct ifreq __user *arg)
--{
-- struct net_device *dev;
-- struct ifreq ifr;
--
-- /*
-- * Fetch the caller's info block.
-- */
--
-- if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
-- return -EFAULT;
--
-- read_lock(&dev_base_lock);
-- dev = __dev_get_by_index(net, ifr.ifr_ifindex);
-- if (!dev) {
-- read_unlock(&dev_base_lock);
-- return -ENODEV;
-- }
--
-- strcpy(ifr.ifr_name, dev->name);
-- read_unlock(&dev_base_lock);
--
-- if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
-- return -EFAULT;
-- return 0;
--}
--
--/*
-- * Perform a SIOCGIFCONF call. This structure will change
-- * size eventually, and there is nothing I can do about it.
-- * Thus we will need a 'compatibility mode'.
-- */
--
--static int dev_ifconf(struct net *net, char __user *arg)
--{
-- struct ifconf ifc;
-- struct net_device *dev;
-- char __user *pos;
-- int len;
-- int total;
-- int i;
--
-- /*
-- * Fetch the caller's info block.
-- */
--
-- if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
-- return -EFAULT;
--
-- pos = ifc.ifc_buf;
-- len = ifc.ifc_len;
--
-- /*
-- * Loop over the interfaces, and write an info block for each.
-- */
--
-- total = 0;
-- for_each_netdev(net, dev) {
-- if (!nx_dev_visible(current->nx_info, dev))
-- continue;
-- for (i = 0; i < NPROTO; i++) {
-- if (gifconf_list[i]) {
-- int done;
-- if (!pos)
-- done = gifconf_list[i](dev, NULL, 0);
-- else
-- done = gifconf_list[i](dev, pos + total,
-- len - total);
-- if (done < 0)
-- return -EFAULT;
-- total += done;
-- }
-- }
-- }
--
-- /*
-- * All done. Write the updated control block back to the caller.
-- */
-- ifc.ifc_len = total;
--
-- /*
-- * Both BSD and Solaris return 0 here, so we do too.
-- */
-- return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
--}
--
--#ifdef CONFIG_PROC_FS
--/*
-- * This is invoked by the /proc filesystem handler to display a device
-- * in detail.
-- */
--void *dev_seq_start(struct seq_file *seq, loff_t *pos)
-- __acquires(dev_base_lock)
--{
-- struct net *net = seq_file_net(seq);
-- loff_t off;
-- struct net_device *dev;
--
-- read_lock(&dev_base_lock);
-- if (!*pos)
-- return SEQ_START_TOKEN;
--
-- off = 1;
-- for_each_netdev(net, dev)
-- if (off++ == *pos)
-- return dev;
--
-- return NULL;
--}
--
--void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
--{
-- struct net *net = seq_file_net(seq);
-- ++*pos;
-- return v == SEQ_START_TOKEN ?
-- first_net_device(net) : next_net_device((struct net_device *)v);
--}
--
--void dev_seq_stop(struct seq_file *seq, void *v)
-- __releases(dev_base_lock)
--{
-- read_unlock(&dev_base_lock);
--}
--
--static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
--{
-- struct net_device_stats *stats = dev->get_stats(dev);
--
-- if (!nx_dev_visible(current->nx_info, dev))
-- return;
--
-- seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
-- "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
-- dev->name, stats->rx_bytes, stats->rx_packets,
-- stats->rx_errors,
-- stats->rx_dropped + stats->rx_missed_errors,
-- stats->rx_fifo_errors,
-- stats->rx_length_errors + stats->rx_over_errors +
-- stats->rx_crc_errors + stats->rx_frame_errors,
-- stats->rx_compressed, stats->multicast,
-- stats->tx_bytes, stats->tx_packets,
-- stats->tx_errors, stats->tx_dropped,
-- stats->tx_fifo_errors, stats->collisions,
-- stats->tx_carrier_errors +
-- stats->tx_aborted_errors +
-- stats->tx_window_errors +
-- stats->tx_heartbeat_errors,
-- stats->tx_compressed);
--}
--
--/*
-- * Called from the PROCfs module. This now uses the new arbitrary sized
-- * /proc/net interface to create /proc/net/dev
-- */
--static int dev_seq_show(struct seq_file *seq, void *v)
--{
-- if (v == SEQ_START_TOKEN)
-- seq_puts(seq, "Inter-| Receive "
-- " | Transmit\n"
-- " face |bytes packets errs drop fifo frame "
-- "compressed multicast|bytes packets errs "
-- "drop fifo colls carrier compressed\n");
-- else
-- dev_seq_printf_stats(seq, v);
-- return 0;
--}
--
--static struct netif_rx_stats *softnet_get_online(loff_t *pos)
--{
-- struct netif_rx_stats *rc = NULL;
--
-- while (*pos < nr_cpu_ids)
-- if (cpu_online(*pos)) {
-- rc = &per_cpu(netdev_rx_stat, *pos);
-- break;
-- } else
-- ++*pos;
-- return rc;
--}
--
--static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
--{
-- return softnet_get_online(pos);
--}
--
--static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
--{
-- ++*pos;
-- return softnet_get_online(pos);
--}
--
--static void softnet_seq_stop(struct seq_file *seq, void *v)
--{
--}
--
--static int softnet_seq_show(struct seq_file *seq, void *v)
--{
-- struct netif_rx_stats *s = v;
--
-- seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
-- s->total, s->dropped, s->time_squeeze, 0,
-- 0, 0, 0, 0, /* was fastroute */
-- s->cpu_collision );
-- return 0;
--}
--
--static const struct seq_operations dev_seq_ops = {
-- .start = dev_seq_start,
-- .next = dev_seq_next,
-- .stop = dev_seq_stop,
-- .show = dev_seq_show,
--};
--
--static int dev_seq_open(struct inode *inode, struct file *file)
--{
-- return seq_open_net(inode, file, &dev_seq_ops,
-- sizeof(struct seq_net_private));
--}
--
--static const struct file_operations dev_seq_fops = {
-- .owner = THIS_MODULE,
-- .open = dev_seq_open,
-- .read = seq_read,
-- .llseek = seq_lseek,
-- .release = seq_release_net,
--};
--
--static const struct seq_operations softnet_seq_ops = {
-- .start = softnet_seq_start,
-- .next = softnet_seq_next,
-- .stop = softnet_seq_stop,
-- .show = softnet_seq_show,
--};
--
--static int softnet_seq_open(struct inode *inode, struct file *file)
--{
-- return seq_open(file, &softnet_seq_ops);
--}
--
--static const struct file_operations softnet_seq_fops = {
-- .owner = THIS_MODULE,
-- .open = softnet_seq_open,
-- .read = seq_read,
-- .llseek = seq_lseek,
-- .release = seq_release,
--};
--
--static void *ptype_get_idx(loff_t pos)
--{
-- struct packet_type *pt = NULL;
-- loff_t i = 0;
-- int t;
--
-- list_for_each_entry_rcu(pt, &ptype_all, list) {
-- if (i == pos)
-- return pt;
-- ++i;
-- }
--
-- for (t = 0; t < PTYPE_HASH_SIZE; t++) {
-- list_for_each_entry_rcu(pt, &ptype_base[t], list) {
-- if (i == pos)
-- return pt;
-- ++i;
-- }
-- }
-- return NULL;
--}
--
--static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
-- __acquires(RCU)
--{
-- rcu_read_lock();
-- return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
--}
--
--static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
--{
-- struct packet_type *pt;
-- struct list_head *nxt;
-- int hash;
--
-- ++*pos;
-- if (v == SEQ_START_TOKEN)
-- return ptype_get_idx(0);
--
-- pt = v;
-- nxt = pt->list.next;
-- if (pt->type == htons(ETH_P_ALL)) {
-- if (nxt != &ptype_all)
-- goto found;
-- hash = 0;
-- nxt = ptype_base[0].next;
-- } else
-- hash = ntohs(pt->type) & PTYPE_HASH_MASK;
--
-- while (nxt == &ptype_base[hash]) {
-- if (++hash >= PTYPE_HASH_SIZE)
-- return NULL;
-- nxt = ptype_base[hash].next;
-- }
--found:
-- return list_entry(nxt, struct packet_type, list);
--}
--
--static void ptype_seq_stop(struct seq_file *seq, void *v)
-- __releases(RCU)
--{
-- rcu_read_unlock();
--}
--
--static void ptype_seq_decode(struct seq_file *seq, void *sym)
--{
--#ifdef CONFIG_KALLSYMS
-- unsigned long offset = 0, symsize;
-- const char *symname;
-- char *modname;
-- char namebuf[128];
--
-- symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
-- &modname, namebuf);
--
-- if (symname) {
-- char *delim = ":";
--
-- if (!modname)
-- modname = delim = "";
-- seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
-- symname, offset);
-- return;
-- }
--#endif
--
-- seq_printf(seq, "[%p]", sym);
--}
--
--static int ptype_seq_show(struct seq_file *seq, void *v)
--{
-- struct packet_type *pt = v;
--
-- if (v == SEQ_START_TOKEN)
-- seq_puts(seq, "Type Device Function\n");
-- else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
-- if (pt->type == htons(ETH_P_ALL))
-- seq_puts(seq, "ALL ");
-- else
-- seq_printf(seq, "%04x", ntohs(pt->type));
--
-- seq_printf(seq, " %-8s ",
-- pt->dev ? pt->dev->name : "");
-- ptype_seq_decode(seq, pt->func);
-- seq_putc(seq, '\n');
-- }
--
-- return 0;
--}
--
--static const struct seq_operations ptype_seq_ops = {
-- .start = ptype_seq_start,
-- .next = ptype_seq_next,
-- .stop = ptype_seq_stop,
-- .show = ptype_seq_show,
--};
--
--static int ptype_seq_open(struct inode *inode, struct file *file)
--{
-- return seq_open_net(inode, file, &ptype_seq_ops,
-- sizeof(struct seq_net_private));
--}
--
--static const struct file_operations ptype_seq_fops = {
-- .owner = THIS_MODULE,
-- .open = ptype_seq_open,
-- .read = seq_read,
-- .llseek = seq_lseek,
-- .release = seq_release_net,
--};
--
--
--static int __net_init dev_proc_net_init(struct net *net)
--{
-- int rc = -ENOMEM;
--
-- if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
-- goto out;
-- if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
-- goto out_dev;
-- if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
-- goto out_softnet;
--
-- if (wext_proc_init(net))
-- goto out_ptype;
-- rc = 0;
--out:
-- return rc;
--out_ptype:
-- proc_net_remove(net, "ptype");
--out_softnet:
-- proc_net_remove(net, "softnet_stat");
--out_dev:
-- proc_net_remove(net, "dev");
-- goto out;
--}
--
--static void __net_exit dev_proc_net_exit(struct net *net)
--{
-- wext_proc_exit(net);
--
-- proc_net_remove(net, "ptype");
-- proc_net_remove(net, "softnet_stat");
-- proc_net_remove(net, "dev");
--}
--
--static struct pernet_operations __net_initdata dev_proc_ops = {
-- .init = dev_proc_net_init,
-- .exit = dev_proc_net_exit,
--};
--
--static int __init dev_proc_init(void)
--{
-- return register_pernet_subsys(&dev_proc_ops);
--}
--#else
--#define dev_proc_init() 0
--#endif /* CONFIG_PROC_FS */
--
--
--/**
-- * netdev_set_master - set up master/slave pair
-- * @slave: slave device
-- * @master: new master device
-- *
-- * Changes the master device of the slave. Pass %NULL to break the
-- * bonding. The caller must hold the RTNL semaphore. On a failure
-- * a negative errno code is returned. On success the reference counts
-- * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
-- * function returns zero.
-- */
--int netdev_set_master(struct net_device *slave, struct net_device *master)
--{
-- struct net_device *old = slave->master;
--
-- ASSERT_RTNL();
--
-- if (master) {
-- if (old)
-- return -EBUSY;
-- dev_hold(master);
-- }
--
-- slave->master = master;
--
-- synchronize_net();
--
-- if (old)
-- dev_put(old);
--
-- if (master)
-- slave->flags |= IFF_SLAVE;
-- else
-- slave->flags &= ~IFF_SLAVE;
--
-- rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
-- return 0;
--}
--
--static void dev_change_rx_flags(struct net_device *dev, int flags)
--{
-- if (dev->flags & IFF_UP && dev->change_rx_flags)
-- dev->change_rx_flags(dev, flags);
--}
--
--static int __dev_set_promiscuity(struct net_device *dev, int inc)
--{
-- unsigned short old_flags = dev->flags;
--
-- ASSERT_RTNL();
--
-- dev->flags |= IFF_PROMISC;
-- dev->promiscuity += inc;
-- if (dev->promiscuity == 0) {
-- /*
-- * Avoid overflow.
-- * If inc causes overflow, untouch promisc and return error.
-- */
-- if (inc < 0)
-- dev->flags &= ~IFF_PROMISC;
-- else {
-- dev->promiscuity -= inc;
-- printk(KERN_WARNING "%s: promiscuity touches roof, "
-- "set promiscuity failed, promiscuity feature "
-- "of device might be broken.\n", dev->name);
-- return -EOVERFLOW;
-- }
-- }
-- if (dev->flags != old_flags) {
-- printk(KERN_INFO "device %s %s promiscuous mode\n",
-- dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
-- "left");
-- if (audit_enabled)
-- audit_log(current->audit_context, GFP_ATOMIC,
-- AUDIT_ANOM_PROMISCUOUS,
-- "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
-- dev->name, (dev->flags & IFF_PROMISC),
-- (old_flags & IFF_PROMISC),
-- audit_get_loginuid(current),
-- current->uid, current->gid,
-- audit_get_sessionid(current));
--
-- dev_change_rx_flags(dev, IFF_PROMISC);
-- }
-- return 0;
--}
--
--/**
-- * dev_set_promiscuity - update promiscuity count on a device
-- * @dev: device
-- * @inc: modifier
-- *
-- * Add or remove promiscuity from a device. While the count in the device
-- * remains above zero the interface remains promiscuous. Once it hits zero
-- * the device reverts back to normal filtering operation. A negative inc
-- * value is used to drop promiscuity on the device.
-- * Return 0 if successful or a negative errno code on error.
-- */
--int dev_set_promiscuity(struct net_device *dev, int inc)
--{
-- unsigned short old_flags = dev->flags;
-- int err;
--
-- err = __dev_set_promiscuity(dev, inc);
-- if (err < 0)
-- return err;
-- if (dev->flags != old_flags)
-- dev_set_rx_mode(dev);
-- return err;
--}
--
--/**
-- * dev_set_allmulti - update allmulti count on a device
-- * @dev: device
-- * @inc: modifier
-- *
-- * Add or remove reception of all multicast frames to a device. While the
-- * count in the device remains above zero the interface remains listening
-- * to all interfaces. Once it hits zero the device reverts back to normal
-- * filtering operation. A negative @inc value is used to drop the counter
-- * when releasing a resource needing all multicasts.
-- * Return 0 if successful or a negative errno code on error.
-- */
--
--int dev_set_allmulti(struct net_device *dev, int inc)
--{
-- unsigned short old_flags = dev->flags;
--
-- ASSERT_RTNL();
--
-- dev->flags |= IFF_ALLMULTI;
-- dev->allmulti += inc;
-- if (dev->allmulti == 0) {
-- /*
-- * Avoid overflow.
-- * If inc causes overflow, untouch allmulti and return error.
-- */
-- if (inc < 0)
-- dev->flags &= ~IFF_ALLMULTI;
-- else {
-- dev->allmulti -= inc;
-- printk(KERN_WARNING "%s: allmulti touches roof, "
-- "set allmulti failed, allmulti feature of "
-- "device might be broken.\n", dev->name);
-- return -EOVERFLOW;
-- }
-- }
-- if (dev->flags ^ old_flags) {
-- dev_change_rx_flags(dev, IFF_ALLMULTI);
-- dev_set_rx_mode(dev);
-- }
-- return 0;
--}
--
--/*
-- * Upload unicast and multicast address lists to device and
-- * configure RX filtering. When the device doesn't support unicast
-- * filtering it is put in promiscuous mode while unicast addresses
-- * are present.
-- */
--void __dev_set_rx_mode(struct net_device *dev)
--{
-- /* dev_open will call this function so the list will stay sane. */
-- if (!(dev->flags&IFF_UP))
-- return;
--
-- if (!netif_device_present(dev))
-- return;
--
-- if (dev->set_rx_mode)
-- dev->set_rx_mode(dev);
-- else {
-- /* Unicast addresses changes may only happen under the rtnl,
-- * therefore calling __dev_set_promiscuity here is safe.
-- */
-- if (dev->uc_count > 0 && !dev->uc_promisc) {
-- __dev_set_promiscuity(dev, 1);
-- dev->uc_promisc = 1;
-- } else if (dev->uc_count == 0 && dev->uc_promisc) {
-- __dev_set_promiscuity(dev, -1);
-- dev->uc_promisc = 0;
-- }
--
-- if (dev->set_multicast_list)
-- dev->set_multicast_list(dev);
-- }
--}
--
--void dev_set_rx_mode(struct net_device *dev)
--{
-- netif_addr_lock_bh(dev);
-- __dev_set_rx_mode(dev);
-- netif_addr_unlock_bh(dev);
--}
--
--int __dev_addr_delete(struct dev_addr_list **list, int *count,
-- void *addr, int alen, int glbl)
--{
-- struct dev_addr_list *da;
--
-- for (; (da = *list) != NULL; list = &da->next) {
-- if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
-- alen == da->da_addrlen) {
-- if (glbl) {
-- int old_glbl = da->da_gusers;
-- da->da_gusers = 0;
-- if (old_glbl == 0)
-- break;
-- }
-- if (--da->da_users)
-- return 0;
--
-- *list = da->next;
-- kfree(da);
-- (*count)--;
-- return 0;
-- }
-- }
-- return -ENOENT;
--}
--
--int __dev_addr_add(struct dev_addr_list **list, int *count,
-- void *addr, int alen, int glbl)
--{
-- struct dev_addr_list *da;
--
-- for (da = *list; da != NULL; da = da->next) {
-- if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
-- da->da_addrlen == alen) {
-- if (glbl) {
-- int old_glbl = da->da_gusers;
-- da->da_gusers = 1;
-- if (old_glbl)
-- return 0;
-- }
-- da->da_users++;
-- return 0;
-- }
-- }
--
-- da = kzalloc(sizeof(*da), GFP_ATOMIC);
-- if (da == NULL)
-- return -ENOMEM;
-- memcpy(da->da_addr, addr, alen);
-- da->da_addrlen = alen;
-- da->da_users = 1;
-- da->da_gusers = glbl ? 1 : 0;
-- da->next = *list;
-- *list = da;
-- (*count)++;
-- return 0;
--}
--
--/**
-- * dev_unicast_delete - Release secondary unicast address.
-- * @dev: device
-- * @addr: address to delete
-- * @alen: length of @addr
-- *
-- * Release reference to a secondary unicast address and remove it
-- * from the device if the reference count drops to zero.
-- *
-- * The caller must hold the rtnl_mutex.
-- */
--int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
--{
-- int err;
--
-- ASSERT_RTNL();
--
-- netif_addr_lock_bh(dev);
-- err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
-- if (!err)
-- __dev_set_rx_mode(dev);
-- netif_addr_unlock_bh(dev);
-- return err;
--}
--EXPORT_SYMBOL(dev_unicast_delete);
--
--/**
-- * dev_unicast_add - add a secondary unicast address
-- * @dev: device
-- * @addr: address to add
-- * @alen: length of @addr
-- *
-- * Add a secondary unicast address to the device or increase
-- * the reference count if it already exists.
-- *
-- * The caller must hold the rtnl_mutex.
-- */
--int dev_unicast_add(struct net_device *dev, void *addr, int alen)
--{
-- int err;
--
-- ASSERT_RTNL();
--
-- netif_addr_lock_bh(dev);
-- err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
-- if (!err)
-- __dev_set_rx_mode(dev);
-- netif_addr_unlock_bh(dev);
-- return err;
--}
--EXPORT_SYMBOL(dev_unicast_add);
--
--int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
-- struct dev_addr_list **from, int *from_count)
--{
-- struct dev_addr_list *da, *next;
-- int err = 0;
--
-- da = *from;
-- while (da != NULL) {
-- next = da->next;
-- if (!da->da_synced) {
-- err = __dev_addr_add(to, to_count,
-- da->da_addr, da->da_addrlen, 0);
-- if (err < 0)
-- break;
-- da->da_synced = 1;
-- da->da_users++;
-- } else if (da->da_users == 1) {
-- __dev_addr_delete(to, to_count,
-- da->da_addr, da->da_addrlen, 0);
-- __dev_addr_delete(from, from_count,
-- da->da_addr, da->da_addrlen, 0);
-- }
-- da = next;
-- }
-- return err;
--}
--
--void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
-- struct dev_addr_list **from, int *from_count)
--{
-- struct dev_addr_list *da, *next;
--
-- da = *from;
-- while (da != NULL) {
-- next = da->next;
-- if (da->da_synced) {
-- __dev_addr_delete(to, to_count,
-- da->da_addr, da->da_addrlen, 0);
-- da->da_synced = 0;
-- __dev_addr_delete(from, from_count,
-- da->da_addr, da->da_addrlen, 0);
-- }
-- da = next;
-- }
--}
--
--/**
-- * dev_unicast_sync - Synchronize device's unicast list to another device
-- * @to: destination device
-- * @from: source device
-- *
-- * Add newly added addresses to the destination device and release
-- * addresses that have no users left. The source device must be
-- * locked by netif_tx_lock_bh.
-- *
-- * This function is intended to be called from the dev->set_rx_mode
-- * function of layered software devices.
-- */
--int dev_unicast_sync(struct net_device *to, struct net_device *from)
--{
-- int err = 0;
--
-- netif_addr_lock_bh(to);
-- err = __dev_addr_sync(&to->uc_list, &to->uc_count,
-- &from->uc_list, &from->uc_count);
-- if (!err)
-- __dev_set_rx_mode(to);
-- netif_addr_unlock_bh(to);
-- return err;
--}
--EXPORT_SYMBOL(dev_unicast_sync);
--
--/**
-- * dev_unicast_unsync - Remove synchronized addresses from the destination device
-- * @to: destination device
-- * @from: source device
-- *
-- * Remove all addresses that were added to the destination device by
-- * dev_unicast_sync(). This function is intended to be called from the
-- * dev->stop function of layered software devices.
-- */
--void dev_unicast_unsync(struct net_device *to, struct net_device *from)
--{
-- netif_addr_lock_bh(from);
-- netif_addr_lock(to);
--
-- __dev_addr_unsync(&to->uc_list, &to->uc_count,
-- &from->uc_list, &from->uc_count);
-- __dev_set_rx_mode(to);
--
-- netif_addr_unlock(to);
-- netif_addr_unlock_bh(from);
--}
--EXPORT_SYMBOL(dev_unicast_unsync);
--
--static void __dev_addr_discard(struct dev_addr_list **list)
--{
-- struct dev_addr_list *tmp;
--
-- while (*list != NULL) {
-- tmp = *list;
-- *list = tmp->next;
-- if (tmp->da_users > tmp->da_gusers)
-- printk("__dev_addr_discard: address leakage! "
-- "da_users=%d\n", tmp->da_users);
-- kfree(tmp);
-- }
--}
--
--static void dev_addr_discard(struct net_device *dev)
--{
-- netif_addr_lock_bh(dev);
--
-- __dev_addr_discard(&dev->uc_list);
-- dev->uc_count = 0;
--
-- __dev_addr_discard(&dev->mc_list);
-- dev->mc_count = 0;
--
-- netif_addr_unlock_bh(dev);
--}
--
--unsigned dev_get_flags(const struct net_device *dev)
--{
-- unsigned flags;
--
-- flags = (dev->flags & ~(IFF_PROMISC |
-- IFF_ALLMULTI |
-- IFF_RUNNING |
-- IFF_LOWER_UP |
-- IFF_DORMANT)) |
-- (dev->gflags & (IFF_PROMISC |
-- IFF_ALLMULTI));
--
-- if (netif_running(dev)) {
-- if (netif_oper_up(dev))
-- flags |= IFF_RUNNING;
-- if (netif_carrier_ok(dev))
-- flags |= IFF_LOWER_UP;
-- if (netif_dormant(dev))
-- flags |= IFF_DORMANT;
-- }
--
-- return flags;
--}
--
--int dev_change_flags(struct net_device *dev, unsigned flags)
--{
-- int ret, changes;
-- int old_flags = dev->flags;
--
-- ASSERT_RTNL();
--
-- /*
-- * Set the flags on our device.
-- */
--
-- dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
-- IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
-- IFF_AUTOMEDIA)) |
-- (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
-- IFF_ALLMULTI));
--
-- /*
-- * Load in the correct multicast list now the flags have changed.
-- */
--
-- if ((old_flags ^ flags) & IFF_MULTICAST)
-- dev_change_rx_flags(dev, IFF_MULTICAST);
--
-- dev_set_rx_mode(dev);
--
-- /*
-- * Have we downed the interface. We handle IFF_UP ourselves
-- * according to user attempts to set it, rather than blindly
-- * setting it.
-- */
--
-- ret = 0;
-- if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
-- ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
--
-- if (!ret)
-- dev_set_rx_mode(dev);
-- }
--
-- if (dev->flags & IFF_UP &&
-- ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
-- IFF_VOLATILE)))
-- call_netdevice_notifiers(NETDEV_CHANGE, dev);
--
-- if ((flags ^ dev->gflags) & IFF_PROMISC) {
-- int inc = (flags & IFF_PROMISC) ? +1 : -1;
-- dev->gflags ^= IFF_PROMISC;
-- dev_set_promiscuity(dev, inc);
-- }
--
-- /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
-- is important. Some (broken) drivers set IFF_PROMISC, when
-- IFF_ALLMULTI is requested not asking us and not reporting.
-- */
-- if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
-- int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
-- dev->gflags ^= IFF_ALLMULTI;
-- dev_set_allmulti(dev, inc);
-- }
--
-- /* Exclude state transition flags, already notified */
-- changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
-- if (changes)
-- rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
--
-- return ret;
--}
--
--int dev_set_mtu(struct net_device *dev, int new_mtu)
--{
-- int err;
--
-- if (new_mtu == dev->mtu)
-- return 0;
--
-- /* MTU must be positive. */
-- if (new_mtu < 0)
-- return -EINVAL;
--
-- if (!netif_device_present(dev))
-- return -ENODEV;
--
-- err = 0;
-- if (dev->change_mtu)
-- err = dev->change_mtu(dev, new_mtu);
-- else
-- dev->mtu = new_mtu;
-- if (!err && dev->flags & IFF_UP)
-- call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
-- return err;
--}
--
--int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
--{
-- int err;
--
-- if (!dev->set_mac_address)
-- return -EOPNOTSUPP;
-- if (sa->sa_family != dev->type)
-- return -EINVAL;
-- if (!netif_device_present(dev))
-- return -ENODEV;
-- err = dev->set_mac_address(dev, sa);
-- if (!err)
-- call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
-- return err;
--}
--
--/*
-- * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
-- */
--static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
--{
-- int err;
-- struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
--
-- if (!dev)
-- return -ENODEV;
--
-- switch (cmd) {
-- case SIOCGIFFLAGS: /* Get interface flags */
-- ifr->ifr_flags = dev_get_flags(dev);
-- return 0;
--
-- case SIOCGIFMETRIC: /* Get the metric on the interface
-- (currently unused) */
-- ifr->ifr_metric = 0;
-- return 0;
--
-- case SIOCGIFMTU: /* Get the MTU of a device */
-- ifr->ifr_mtu = dev->mtu;
-- return 0;
--
-- case SIOCGIFHWADDR:
-- if (!dev->addr_len)
-- memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
-- else
-- memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
-- min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
-- ifr->ifr_hwaddr.sa_family = dev->type;
-- return 0;
--
-- case SIOCGIFSLAVE:
-- err = -EINVAL;
-- break;
--
-- case SIOCGIFMAP:
-- ifr->ifr_map.mem_start = dev->mem_start;
-- ifr->ifr_map.mem_end = dev->mem_end;
-- ifr->ifr_map.base_addr = dev->base_addr;
-- ifr->ifr_map.irq = dev->irq;
-- ifr->ifr_map.dma = dev->dma;
-- ifr->ifr_map.port = dev->if_port;
-- return 0;
--
-- case SIOCGIFINDEX:
-- ifr->ifr_ifindex = dev->ifindex;
-- return 0;
--
-- case SIOCGIFTXQLEN:
-- ifr->ifr_qlen = dev->tx_queue_len;
-- return 0;
--
-- default:
-- /* dev_ioctl() should ensure this case
-- * is never reached
-- */
-- WARN_ON(1);
-- err = -EINVAL;
-- break;
--
-- }
-- return err;
--}
--
--/*
-- * Perform the SIOCxIFxxx calls, inside rtnl_lock()
-- */
--static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
--{
-- int err;
-- struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
--
-- if (!dev)
-- return -ENODEV;
--
-- switch (cmd) {
-- case SIOCSIFFLAGS: /* Set interface flags */
-- return dev_change_flags(dev, ifr->ifr_flags);
--
-- case SIOCSIFMETRIC: /* Set the metric on the interface
-- (currently unused) */
-- return -EOPNOTSUPP;
--
-- case SIOCSIFMTU: /* Set the MTU of a device */
-- return dev_set_mtu(dev, ifr->ifr_mtu);
--
-- case SIOCSIFHWADDR:
-- return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
--
-- case SIOCSIFHWBROADCAST:
-- if (ifr->ifr_hwaddr.sa_family != dev->type)
-- return -EINVAL;
-- memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
-- min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
-- call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
-- return 0;
--
-- case SIOCSIFMAP:
-- if (dev->set_config) {
-- if (!netif_device_present(dev))
-- return -ENODEV;
-- return dev->set_config(dev, &ifr->ifr_map);
-- }
-- return -EOPNOTSUPP;
--
-- case SIOCADDMULTI:
-- if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
-- ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
-- return -EINVAL;
-- if (!netif_device_present(dev))
-- return -ENODEV;
-- return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
-- dev->addr_len, 1);
--
-- case SIOCDELMULTI:
-- if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
-- ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
-- return -EINVAL;
-- if (!netif_device_present(dev))
-- return -ENODEV;
-- return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
-- dev->addr_len, 1);
--
-- case SIOCSIFTXQLEN:
-- if (ifr->ifr_qlen < 0)
-- return -EINVAL;
-- dev->tx_queue_len = ifr->ifr_qlen;
-- return 0;
--
-- case SIOCSIFNAME:
-- ifr->ifr_newname[IFNAMSIZ-1] = '\0';
-- return dev_change_name(dev, ifr->ifr_newname);
--
-- /*
-- * Unknown or private ioctl
-- */
--
-- default:
-- if ((cmd >= SIOCDEVPRIVATE &&
-- cmd <= SIOCDEVPRIVATE + 15) ||
-- cmd == SIOCBONDENSLAVE ||
-- cmd == SIOCBONDRELEASE ||
-- cmd == SIOCBONDSETHWADDR ||
-- cmd == SIOCBONDSLAVEINFOQUERY ||
-- cmd == SIOCBONDINFOQUERY ||
-- cmd == SIOCBONDCHANGEACTIVE ||
-- cmd == SIOCGMIIPHY ||
-- cmd == SIOCGMIIREG ||
-- cmd == SIOCSMIIREG ||
-- cmd == SIOCBRADDIF ||
-- cmd == SIOCBRDELIF ||
-- cmd == SIOCWANDEV) {
-- err = -EOPNOTSUPP;
-- if (dev->do_ioctl) {
-- if (netif_device_present(dev))
-- err = dev->do_ioctl(dev, ifr,
-- cmd);
-- else
-- err = -ENODEV;
-- }
-- } else
-- err = -EINVAL;
--
-- }
-- return err;
--}
--
--/*
-- * This function handles all "interface"-type I/O control requests. The actual
-- * 'doing' part of this is dev_ifsioc above.
-- */
--
--/**
-- * dev_ioctl - network device ioctl
-- * @net: the applicable net namespace
-- * @cmd: command to issue
-- * @arg: pointer to a struct ifreq in user space
-- *
-- * Issue ioctl functions to devices. This is normally called by the
-- * user space syscall interfaces but can sometimes be useful for
-- * other purposes. The return value is the return from the syscall if
-- * positive or a negative errno code on error.
-- */
--
--int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
--{
-- struct ifreq ifr;
-- int ret;
-- char *colon;
--
-- /* One special case: SIOCGIFCONF takes ifconf argument
-- and requires shared lock, because it sleeps writing
-- to user space.
-- */
--
-- if (cmd == SIOCGIFCONF) {
-- rtnl_lock();
-- ret = dev_ifconf(net, (char __user *) arg);
-- rtnl_unlock();
-- return ret;
-- }
-- if (cmd == SIOCGIFNAME)
-- return dev_ifname(net, (struct ifreq __user *)arg);
--
-- if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
-- return -EFAULT;
--
-- ifr.ifr_name[IFNAMSIZ-1] = 0;
--
-- colon = strchr(ifr.ifr_name, ':');
-- if (colon)
-- *colon = 0;
--
-- /*
-- * See which interface the caller is talking about.
-- */
--
-- switch (cmd) {
-- /*
-- * These ioctl calls:
-- * - can be done by all.
-- * - atomic and do not require locking.
-- * - return a value
-- */
-- case SIOCGIFFLAGS:
-- case SIOCGIFMETRIC:
-- case SIOCGIFMTU:
-- case SIOCGIFHWADDR:
-- case SIOCGIFSLAVE:
-- case SIOCGIFMAP:
-- case SIOCGIFINDEX:
-- case SIOCGIFTXQLEN:
-- dev_load(net, ifr.ifr_name);
-- read_lock(&dev_base_lock);
-- ret = dev_ifsioc_locked(net, &ifr, cmd);
-- read_unlock(&dev_base_lock);
-- if (!ret) {
-- if (colon)
-- *colon = ':';
-- if (copy_to_user(arg, &ifr,
-- sizeof(struct ifreq)))
-- ret = -EFAULT;
-- }
-- return ret;
--
-- case SIOCETHTOOL:
-- dev_load(net, ifr.ifr_name);
-- rtnl_lock();
-- ret = dev_ethtool(net, &ifr);
-- rtnl_unlock();
-- if (!ret) {
-- if (colon)
-- *colon = ':';
-- if (copy_to_user(arg, &ifr,
-- sizeof(struct ifreq)))
-- ret = -EFAULT;
-- }
-- return ret;
--
-- /*
-- * These ioctl calls:
-- * - require superuser power.
-- * - require strict serialization.
-- * - return a value
-- */
-- case SIOCGMIIPHY:
-- case SIOCGMIIREG:
-- case SIOCSIFNAME:
-- if (!capable(CAP_NET_ADMIN))
-- return -EPERM;
-- dev_load(net, ifr.ifr_name);
-- rtnl_lock();
-- ret = dev_ifsioc(net, &ifr, cmd);
-- rtnl_unlock();
-- if (!ret) {
-- if (colon)
-- *colon = ':';
-- if (copy_to_user(arg, &ifr,
-- sizeof(struct ifreq)))
-- ret = -EFAULT;
-- }
-- return ret;
--
-- /*
-- * These ioctl calls:
-- * - require superuser power.
-- * - require strict serialization.
-- * - do not return a value
-- */
-- case SIOCSIFFLAGS:
-- case SIOCSIFMETRIC:
-- case SIOCSIFMTU:
-- case SIOCSIFMAP:
-- case SIOCSIFHWADDR:
-- case SIOCSIFSLAVE:
-- case SIOCADDMULTI:
-- case SIOCDELMULTI:
-- case SIOCSIFHWBROADCAST:
-- case SIOCSIFTXQLEN:
-- case SIOCSMIIREG:
-- case SIOCBONDENSLAVE:
-- case SIOCBONDRELEASE:
-- case SIOCBONDSETHWADDR:
-- case SIOCBONDCHANGEACTIVE:
-- case SIOCBRADDIF:
-- case SIOCBRDELIF:
-- if (!capable(CAP_NET_ADMIN))
-- return -EPERM;
-- /* fall through */
-- case SIOCBONDSLAVEINFOQUERY:
-- case SIOCBONDINFOQUERY:
-- dev_load(net, ifr.ifr_name);
-- rtnl_lock();
-- ret = dev_ifsioc(net, &ifr, cmd);
-- rtnl_unlock();
-- return ret;
--
-- case SIOCGIFMEM:
-- /* Get the per device memory space. We can add this but
-- * currently do not support it */
-- case SIOCSIFMEM:
-- /* Set the per device memory buffer space.
-- * Not applicable in our case */
-- case SIOCSIFLINK:
-- return -EINVAL;
--
-- /*
-- * Unknown or private ioctl.
-- */
-- default:
-- if (cmd == SIOCWANDEV ||
-- (cmd >= SIOCDEVPRIVATE &&
-- cmd <= SIOCDEVPRIVATE + 15)) {
-- dev_load(net, ifr.ifr_name);
-- rtnl_lock();
-- ret = dev_ifsioc(net, &ifr, cmd);
-- rtnl_unlock();
-- if (!ret && copy_to_user(arg, &ifr,
-- sizeof(struct ifreq)))
-- ret = -EFAULT;
-- return ret;
-- }
-- /* Take care of Wireless Extensions */
-- if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
-- return wext_handle_ioctl(net, &ifr, cmd, arg);
-- return -EINVAL;
-- }
--}
--
--
--/**
-- * dev_new_index - allocate an ifindex
-- * @net: the applicable net namespace
-- *
-- * Returns a suitable unique value for a new device interface
-- * number. The caller must hold the rtnl semaphore or the
-- * dev_base_lock to be sure it remains unique.
-- */
--static int dev_new_index(struct net *net)
--{
-- static int ifindex;
-- for (;;) {
-- if (++ifindex <= 0)
-- ifindex = 1;
-- if (!__dev_get_by_index(net, ifindex))
-- return ifindex;
-- }
--}
--
--/* Delayed registration/unregisteration */
--static LIST_HEAD(net_todo_list);
--
--static void net_set_todo(struct net_device *dev)
--{
-- list_add_tail(&dev->todo_list, &net_todo_list);
--}
--
--static void rollback_registered(struct net_device *dev)
--{
-- BUG_ON(dev_boot_phase);
-- ASSERT_RTNL();
--
-- /* Some devices call without registering for initialization unwind. */
-- if (dev->reg_state == NETREG_UNINITIALIZED) {
-- printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
-- "was registered\n", dev->name, dev);
--
-- WARN_ON(1);
-- return;
-- }
--
-- BUG_ON(dev->reg_state != NETREG_REGISTERED);
--
-- /* If device is running, close it first. */
-- dev_close(dev);
--
-- /* And unlink it from device chain. */
-- unlist_netdevice(dev);
--
-- dev->reg_state = NETREG_UNREGISTERING;
--
-- synchronize_net();
--
-- /* Shutdown queueing discipline. */
-- dev_shutdown(dev);
--
--
-- /* Notify protocols, that we are about to destroy
-- this device. They should clean all the things.
-- */
-- call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
--
-- /*
-- * Flush the unicast and multicast chains
-- */
-- dev_addr_discard(dev);
--
-- if (dev->uninit)
-- dev->uninit(dev);
--
-- /* Notifier chain MUST detach us from master device. */
-- WARN_ON(dev->master);
--
-- /* Remove entries from kobject tree */
-- netdev_unregister_kobject(dev);
--
-- synchronize_net();
--
-- dev_put(dev);
--}
--
--static void __netdev_init_queue_locks_one(struct net_device *dev,
-- struct netdev_queue *dev_queue,
-- void *_unused)
--{
-- spin_lock_init(&dev_queue->_xmit_lock);
-- netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
-- dev_queue->xmit_lock_owner = -1;
--}
--
--static void netdev_init_queue_locks(struct net_device *dev)
--{
-- netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
-- __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
--}
--
--/**
-- * register_netdevice - register a network device
-- * @dev: device to register
-- *
-- * Take a completed network device structure and add it to the kernel
-- * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
-- * chain. 0 is returned on success. A negative errno code is returned
-- * on a failure to set up the device, or if the name is a duplicate.
-- *
-- * Callers must hold the rtnl semaphore. You may want
-- * register_netdev() instead of this.
-- *
-- * BUGS:
-- * The locking appears insufficient to guarantee two parallel registers
-- * will not get the same name.
-- */
--
--int register_netdevice(struct net_device *dev)
--{
-- struct hlist_head *head;
-- struct hlist_node *p;
-- int ret;
-- struct net *net;
--
-- BUG_ON(dev_boot_phase);
-- ASSERT_RTNL();
--
-- might_sleep();
--
-- /* When net_device's are persistent, this will be fatal. */
-- BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
-- BUG_ON(!dev_net(dev));
-- net = dev_net(dev);
--
-- spin_lock_init(&dev->addr_list_lock);
-- netdev_set_addr_lockdep_class(dev);
-- netdev_init_queue_locks(dev);
--
-- dev->iflink = -1;
--
-- /* Init, if this function is available */
-- if (dev->init) {
-- ret = dev->init(dev);
-- if (ret) {
-- if (ret > 0)
-- ret = -EIO;
-- goto out;
-- }
-- }
--
-- if (!dev_valid_name(dev->name)) {
-- ret = -EINVAL;
-- goto err_uninit;
-- }
--
-- dev->ifindex = dev_new_index(net);
-- if (dev->iflink == -1)
-- dev->iflink = dev->ifindex;
--
-- /* Check for existence of name */
-- head = dev_name_hash(net, dev->name);
-- hlist_for_each(p, head) {
-- struct net_device *d
-- = hlist_entry(p, struct net_device, name_hlist);
-- if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
-- ret = -EEXIST;
-- goto err_uninit;
-- }
-- }
--
-- /* Fix illegal checksum combinations */
-- if ((dev->features & NETIF_F_HW_CSUM) &&
-- (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-- printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
-- dev->name);
-- dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
-- }
--
-- if ((dev->features & NETIF_F_NO_CSUM) &&
-- (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-- printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
-- dev->name);
-- dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
-- }
--
--
-- /* Fix illegal SG+CSUM combinations. */
-- if ((dev->features & NETIF_F_SG) &&
-- !(dev->features & NETIF_F_ALL_CSUM)) {
-- printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
-- dev->name);
-- dev->features &= ~NETIF_F_SG;
-- }
--
-- /* TSO requires that SG is present as well. */
-- if ((dev->features & NETIF_F_TSO) &&
-- !(dev->features & NETIF_F_SG)) {
-- printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
-- dev->name);
-- dev->features &= ~NETIF_F_TSO;
-- }
-- if (dev->features & NETIF_F_UFO) {
-- if (!(dev->features & NETIF_F_GEN_CSUM)) {
-- printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
-- "NETIF_F_HW_CSUM feature.\n",
-- dev->name);
-- dev->features &= ~NETIF_F_UFO;
-- }
-- if (!(dev->features & NETIF_F_SG)) {
-- printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
-- "NETIF_F_SG feature.\n",
-- dev->name);
-- dev->features &= ~NETIF_F_UFO;
-- }
-- }
--
-- /* Enable software GSO if SG is supported. */
-- if (dev->features & NETIF_F_SG)
-- dev->features |= NETIF_F_GSO;
--
-- netdev_initialize_kobject(dev);
-- ret = netdev_register_kobject(dev);
-- if (ret)
-- goto err_uninit;
-- dev->reg_state = NETREG_REGISTERED;
--
-- /*
-- * Default initial state at registry is that the
-- * device is present.
-- */
--
-- set_bit(__LINK_STATE_PRESENT, &dev->state);
--
-- dev_init_scheduler(dev);
-- dev_hold(dev);
-- list_netdevice(dev);
--
-- /* Notify protocols, that a new device appeared. */
-- ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
-- ret = notifier_to_errno(ret);
-- if (ret) {
-- rollback_registered(dev);
-- dev->reg_state = NETREG_UNREGISTERED;
-- }
--
--out:
-- return ret;
--
--err_uninit:
-- if (dev->uninit)
-- dev->uninit(dev);
-- goto out;
--}
--
--/**
-- * register_netdev - register a network device
-- * @dev: device to register
-- *
-- * Take a completed network device structure and add it to the kernel
-- * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
-- * chain. 0 is returned on success. A negative errno code is returned
-- * on a failure to set up the device, or if the name is a duplicate.
-- *
-- * This is a wrapper around register_netdevice that takes the rtnl semaphore
-- * and expands the device name if you passed a format string to
-- * alloc_netdev.
-- */
--int register_netdev(struct net_device *dev)
--{
-- int err;
--
-- rtnl_lock();
--
-- /*
-- * If the name is a format string the caller wants us to do a
-- * name allocation.
-- */
-- if (strchr(dev->name, '%')) {
-- err = dev_alloc_name(dev, dev->name);
-- if (err < 0)
-- goto out;
-- }
--
-- err = register_netdevice(dev);
--out:
-- rtnl_unlock();
-- return err;
--}
--EXPORT_SYMBOL(register_netdev);
--
--/*
-- * netdev_wait_allrefs - wait until all references are gone.
-- *
-- * This is called when unregistering network devices.
-- *
-- * Any protocol or device that holds a reference should register
-- * for netdevice notification, and cleanup and put back the
-- * reference if they receive an UNREGISTER event.
-- * We can get stuck here if buggy protocols don't correctly
-- * call dev_put.
-- */
--static void netdev_wait_allrefs(struct net_device *dev)
--{
-- unsigned long rebroadcast_time, warning_time;
--
-- rebroadcast_time = warning_time = jiffies;
-- while (atomic_read(&dev->refcnt) != 0) {
-- if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
-- rtnl_lock();
--
-- /* Rebroadcast unregister notification */
-- call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
--
-- if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
-- &dev->state)) {
-- /* We must not have linkwatch events
-- * pending on unregister. If this
-- * happens, we simply run the queue
-- * unscheduled, resulting in a noop
-- * for this device.
-- */
-- linkwatch_run_queue();
-- }
--
-- __rtnl_unlock();
--
-- rebroadcast_time = jiffies;
-- }
--
-- msleep(250);
--
-- if (time_after(jiffies, warning_time + 10 * HZ)) {
-- printk(KERN_EMERG "unregister_netdevice: "
-- "waiting for %s to become free. Usage "
-- "count = %d\n",
-- dev->name, atomic_read(&dev->refcnt));
-- warning_time = jiffies;
-- }
-- }
--}
--
--/* The sequence is:
-- *
-- * rtnl_lock();
-- * ...
-- * register_netdevice(x1);
-- * register_netdevice(x2);
-- * ...
-- * unregister_netdevice(y1);
-- * unregister_netdevice(y2);
-- * ...
-- * rtnl_unlock();
-- * free_netdev(y1);
-- * free_netdev(y2);
-- *
-- * We are invoked by rtnl_unlock().
-- * This allows us to deal with problems:
-- * 1) We can delete sysfs objects which invoke hotplug
-- * without deadlocking with linkwatch via keventd.
-- * 2) Since we run with the RTNL semaphore not held, we can sleep
-- * safely in order to wait for the netdev refcnt to drop to zero.
-- *
-- * We must not return until all unregister events added during
-- * the interval the lock was held have been completed.
-- */
--void netdev_run_todo(void)
--{
-- struct list_head list;
--
-- /* Snapshot list, allow later requests */
-- list_replace_init(&net_todo_list, &list);
--
-- __rtnl_unlock();
--
-- while (!list_empty(&list)) {
-- struct net_device *dev
-- = list_entry(list.next, struct net_device, todo_list);
-- list_del(&dev->todo_list);
--
-- if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
-- printk(KERN_ERR "network todo '%s' but state %d\n",
-- dev->name, dev->reg_state);
-- dump_stack();
-- continue;
-- }
--
-- dev->reg_state = NETREG_UNREGISTERED;
--
-- on_each_cpu(flush_backlog, dev, 1);
--
-- netdev_wait_allrefs(dev);
--
-- /* paranoia */
-- BUG_ON(atomic_read(&dev->refcnt));
-- WARN_ON(dev->ip_ptr);
-- WARN_ON(dev->ip6_ptr);
-- WARN_ON(dev->dn_ptr);
--
-- if (dev->destructor)
-- dev->destructor(dev);
--
-- /* Free network device */
-- kobject_put(&dev->dev.kobj);
-- }
--}
--
--static struct net_device_stats *internal_stats(struct net_device *dev)
--{
-- return &dev->stats;
--}
--
--static void netdev_init_one_queue(struct net_device *dev,
-- struct netdev_queue *queue,
-- void *_unused)
--{
-- queue->dev = dev;
--}
--
--static void netdev_init_queues(struct net_device *dev)
--{
-- netdev_init_one_queue(dev, &dev->rx_queue, NULL);
-- netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
-- spin_lock_init(&dev->tx_global_lock);
--}
--
--/**
-- * alloc_netdev_mq - allocate network device
-- * @sizeof_priv: size of private data to allocate space for
-- * @name: device name format string
-- * @setup: callback to initialize device
-- * @queue_count: the number of subqueues to allocate
-- *
-- * Allocates a struct net_device with private data area for driver use
-- * and performs basic initialization. Also allocates subquue structs
-- * for each queue on the device at the end of the netdevice.
-- */
--struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
-- void (*setup)(struct net_device *), unsigned int queue_count)
--{
-- struct netdev_queue *tx;
-- struct net_device *dev;
-- size_t alloc_size;
-- void *p;
--
-- BUG_ON(strlen(name) >= sizeof(dev->name));
--
-- alloc_size = sizeof(struct net_device);
-- if (sizeof_priv) {
-- /* ensure 32-byte alignment of private area */
-- alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
-- alloc_size += sizeof_priv;
-- }
-- /* ensure 32-byte alignment of whole construct */
-- alloc_size += NETDEV_ALIGN_CONST;
--
-- p = kzalloc(alloc_size, GFP_KERNEL);
-- if (!p) {
-- printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
-- return NULL;
-- }
--
-- tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
-- if (!tx) {
-- printk(KERN_ERR "alloc_netdev: Unable to allocate "
-- "tx qdiscs.\n");
-- kfree(p);
-- return NULL;
-- }
--
-- dev = (struct net_device *)
-- (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
-- dev->padded = (char *)dev - (char *)p;
-- dev_net_set(dev, &init_net);
--
-- dev->_tx = tx;
-- dev->num_tx_queues = queue_count;
-- dev->real_num_tx_queues = queue_count;
--
-- if (sizeof_priv) {
-- dev->priv = ((char *)dev +
-- ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
-- & ~NETDEV_ALIGN_CONST));
-- }
--
-- dev->gso_max_size = GSO_MAX_SIZE;
--
-- netdev_init_queues(dev);
--
-- dev->get_stats = internal_stats;
-- netpoll_netdev_init(dev);
-- setup(dev);
-- strcpy(dev->name, name);
-- return dev;
--}
--EXPORT_SYMBOL(alloc_netdev_mq);
--
--/**
-- * free_netdev - free network device
-- * @dev: device
-- *
-- * This function does the last stage of destroying an allocated device
-- * interface. The reference to the device object is released.
-- * If this is the last reference then it will be freed.
-- */
--void free_netdev(struct net_device *dev)
--{
-- release_net(dev_net(dev));
--
-- kfree(dev->_tx);
--
-- /* Compatibility with error handling in drivers */
-- if (dev->reg_state == NETREG_UNINITIALIZED) {
-- kfree((char *)dev - dev->padded);
-- return;
-- }
--
-- BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
-- dev->reg_state = NETREG_RELEASED;
--
-- /* will free via device release */
-- put_device(&dev->dev);
--}
--
--/* Synchronize with packet receive processing. */
--void synchronize_net(void)
--{
-- might_sleep();
-- synchronize_rcu();
--}
--
--/**
-- * unregister_netdevice - remove device from the kernel
-- * @dev: device
-- *
-- * This function shuts down a device interface and removes it
-- * from the kernel tables.
-- *
-- * Callers must hold the rtnl semaphore. You may want
-- * unregister_netdev() instead of this.
-- */
--
--void unregister_netdevice(struct net_device *dev)
--{
-- ASSERT_RTNL();
--
-- rollback_registered(dev);
-- /* Finish processing unregister after unlock */
-- net_set_todo(dev);
--}
--
--/**
-- * unregister_netdev - remove device from the kernel
-- * @dev: device
-- *
-- * This function shuts down a device interface and removes it
-- * from the kernel tables.
-- *
-- * This is just a wrapper for unregister_netdevice that takes
-- * the rtnl semaphore. In general you want to use this and not
-- * unregister_netdevice.
-- */
--void unregister_netdev(struct net_device *dev)
--{
-- rtnl_lock();
-- unregister_netdevice(dev);
-- rtnl_unlock();
--}
--
--EXPORT_SYMBOL(unregister_netdev);
--
--/**
-- * dev_change_net_namespace - move device to different nethost namespace
-- * @dev: device
-- * @net: network namespace
-- * @pat: If not NULL name pattern to try if the current device name
-- * is already taken in the destination network namespace.
-- *
-- * This function shuts down a device interface and moves it
-- * to a new network namespace. On success 0 is returned, on
-- * a failure a netagive errno code is returned.
-- *
-- * Callers must hold the rtnl semaphore.
-- */
--
--int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
--{
-- char buf[IFNAMSIZ];
-- const char *destname;
-- int err;
--
-- ASSERT_RTNL();
--
-- /* Don't allow namespace local devices to be moved. */
-- err = -EINVAL;
-- if (dev->features & NETIF_F_NETNS_LOCAL)
-- goto out;
--
--#ifdef CONFIG_SYSFS
-- /* Don't allow real devices to be moved when sysfs
-- * is enabled.
-- */
-- err = -EINVAL;
-- if (dev->dev.parent)
-- goto out;
--#endif
--
-- /* Ensure the device has been registrered */
-- err = -EINVAL;
-- if (dev->reg_state != NETREG_REGISTERED)
-- goto out;
--
-- /* Get out if there is nothing todo */
-- err = 0;
-- if (net_eq(dev_net(dev), net))
-- goto out;
--
-- /* Pick the destination device name, and ensure
-- * we can use it in the destination network namespace.
-- */
-- err = -EEXIST;
-- destname = dev->name;
-- if (__dev_get_by_name(net, destname)) {
-- /* We get here if we can't use the current device name */
-- if (!pat)
-- goto out;
-- if (!dev_valid_name(pat))
-- goto out;
-- if (strchr(pat, '%')) {
-- if (__dev_alloc_name(net, pat, buf) < 0)
-- goto out;
-- destname = buf;
-- } else
-- destname = pat;
-- if (__dev_get_by_name(net, destname))
-- goto out;
-- }
--
-- /*
-- * And now a mini version of register_netdevice unregister_netdevice.
-- */
--
-- /* If device is running close it first. */
-- dev_close(dev);
--
-- /* And unlink it from device chain */
-- err = -ENODEV;
-- unlist_netdevice(dev);
--
-- synchronize_net();
--
-- /* Shutdown queueing discipline. */
-- dev_shutdown(dev);
--
-- /* Notify protocols, that we are about to destroy
-- this device. They should clean all the things.
-- */
-- call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
--
-- /*
-- * Flush the unicast and multicast chains
-- */
-- dev_addr_discard(dev);
--
-- netdev_unregister_kobject(dev);
--
-- /* Actually switch the network namespace */
-- dev_net_set(dev, net);
--
-- /* Assign the new device name */
-- if (destname != dev->name)
-- strcpy(dev->name, destname);
--
-- /* If there is an ifindex conflict assign a new one */
-- if (__dev_get_by_index(net, dev->ifindex)) {
-- int iflink = (dev->iflink == dev->ifindex);
-- dev->ifindex = dev_new_index(net);
-- if (iflink)
-- dev->iflink = dev->ifindex;
-- }
--
-- /* Fixup kobjects */
-- err = netdev_register_kobject(dev);
-- WARN_ON(err);
--
-- /* Add the device back in the hashes */
-- list_netdevice(dev);
--
-- /* Notify protocols, that a new device appeared. */
-- call_netdevice_notifiers(NETDEV_REGISTER, dev);
--
-- synchronize_net();
-- err = 0;
--out:
-- return err;
--}
--
--static int dev_cpu_callback(struct notifier_block *nfb,
-- unsigned long action,
-- void *ocpu)
--{
-- struct sk_buff **list_skb;
-- struct Qdisc **list_net;
-- struct sk_buff *skb;
-- unsigned int cpu, oldcpu = (unsigned long)ocpu;
-- struct softnet_data *sd, *oldsd;
--
-- if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
-- return NOTIFY_OK;
--
-- local_irq_disable();
-- cpu = smp_processor_id();
-- sd = &per_cpu(softnet_data, cpu);
-- oldsd = &per_cpu(softnet_data, oldcpu);
--
-- /* Find end of our completion_queue. */
-- list_skb = &sd->completion_queue;
-- while (*list_skb)
-- list_skb = &(*list_skb)->next;
-- /* Append completion queue from offline CPU. */
-- *list_skb = oldsd->completion_queue;
-- oldsd->completion_queue = NULL;
--
-- /* Find end of our output_queue. */
-- list_net = &sd->output_queue;
-- while (*list_net)
-- list_net = &(*list_net)->next_sched;
-- /* Append output queue from offline CPU. */
-- *list_net = oldsd->output_queue;
-- oldsd->output_queue = NULL;
--
-- raise_softirq_irqoff(NET_TX_SOFTIRQ);
-- local_irq_enable();
--
-- /* Process offline CPU's input_pkt_queue */
-- while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
-- netif_rx(skb);
--
-- return NOTIFY_OK;
--}
--
--#ifdef CONFIG_NET_DMA
--/**
-- * net_dma_rebalance - try to maintain one DMA channel per CPU
-- * @net_dma: DMA client and associated data (lock, channels, channel_mask)
-- *
-- * This is called when the number of channels allocated to the net_dma client
-- * changes. The net_dma client tries to have one DMA channel per CPU.
-- */
--
--static void net_dma_rebalance(struct net_dma *net_dma)
--{
-- unsigned int cpu, i, n, chan_idx;
-- struct dma_chan *chan;
--
-- if (cpus_empty(net_dma->channel_mask)) {
-- for_each_online_cpu(cpu)
-- rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
-- return;
-- }
--
-- i = 0;
-- cpu = first_cpu(cpu_online_map);
--
-- for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
-- chan = net_dma->channels[chan_idx];
--
-- n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
-- + (i < (num_online_cpus() %
-- cpus_weight(net_dma->channel_mask)) ? 1 : 0));
--
-- while(n) {
-- per_cpu(softnet_data, cpu).net_dma = chan;
-- cpu = next_cpu(cpu, cpu_online_map);
-- n--;
-- }
-- i++;
-- }
--}
--
--/**
-- * netdev_dma_event - event callback for the net_dma_client
-- * @client: should always be net_dma_client
-- * @chan: DMA channel for the event
-- * @state: DMA state to be handled
-- */
--static enum dma_state_client
--netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
-- enum dma_state state)
--{
-- int i, found = 0, pos = -1;
-- struct net_dma *net_dma =
-- container_of(client, struct net_dma, client);
-- enum dma_state_client ack = DMA_DUP; /* default: take no action */
--
-- spin_lock(&net_dma->lock);
-- switch (state) {
-- case DMA_RESOURCE_AVAILABLE:
-- for (i = 0; i < nr_cpu_ids; i++)
-- if (net_dma->channels[i] == chan) {
-- found = 1;
-- break;
-- } else if (net_dma->channels[i] == NULL && pos < 0)
-- pos = i;
--
-- if (!found && pos >= 0) {
-- ack = DMA_ACK;
-- net_dma->channels[pos] = chan;
-- cpu_set(pos, net_dma->channel_mask);
-- net_dma_rebalance(net_dma);
-- }
-- break;
-- case DMA_RESOURCE_REMOVED:
-- for (i = 0; i < nr_cpu_ids; i++)
-- if (net_dma->channels[i] == chan) {
-- found = 1;
-- pos = i;
-- break;
-- }
--
-- if (found) {
-- ack = DMA_ACK;
-- cpu_clear(pos, net_dma->channel_mask);
-- net_dma->channels[i] = NULL;
-- net_dma_rebalance(net_dma);
-- }
-- break;
-- default:
-- break;
-- }
-- spin_unlock(&net_dma->lock);
--
-- return ack;
--}
--
--/**
-- * netdev_dma_regiser - register the networking subsystem as a DMA client
-- */
--static int __init netdev_dma_register(void)
--{
-- net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
-- GFP_KERNEL);
-- if (unlikely(!net_dma.channels)) {
-- printk(KERN_NOTICE
-- "netdev_dma: no memory for net_dma.channels\n");
-- return -ENOMEM;
-- }
-- spin_lock_init(&net_dma.lock);
-- dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
-- dma_async_client_register(&net_dma.client);
-- dma_async_client_chan_request(&net_dma.client);
-- return 0;
--}
--
--#else
--static int __init netdev_dma_register(void) { return -ENODEV; }
--#endif /* CONFIG_NET_DMA */
--
--/**
-- * netdev_compute_feature - compute conjunction of two feature sets
-- * @all: first feature set
-- * @one: second feature set
-- *
-- * Computes a new feature set after adding a device with feature set
-- * @one to the master device with current feature set @all. Returns
-- * the new feature set.
-- */
--int netdev_compute_features(unsigned long all, unsigned long one)
--{
-- /* if device needs checksumming, downgrade to hw checksumming */
-- if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
-- all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
--
-- /* if device can't do all checksum, downgrade to ipv4/ipv6 */
-- if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
-- all ^= NETIF_F_HW_CSUM
-- | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
--
-- if (one & NETIF_F_GSO)
-- one |= NETIF_F_GSO_SOFTWARE;
-- one |= NETIF_F_GSO;
--
-- /* If even one device supports robust GSO, enable it for all. */
-- if (one & NETIF_F_GSO_ROBUST)
-- all |= NETIF_F_GSO_ROBUST;
--
-- all &= one | NETIF_F_LLTX;
--
-- if (!(all & NETIF_F_ALL_CSUM))
-- all &= ~NETIF_F_SG;
-- if (!(all & NETIF_F_SG))
-- all &= ~NETIF_F_GSO_MASK;
--
-- return all;
--}
--EXPORT_SYMBOL(netdev_compute_features);
--
--static struct hlist_head *netdev_create_hash(void)
--{
-- int i;
-- struct hlist_head *hash;
--
-- hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
-- if (hash != NULL)
-- for (i = 0; i < NETDEV_HASHENTRIES; i++)
-- INIT_HLIST_HEAD(&hash[i]);
--
-- return hash;
--}
--
--/* Initialize per network namespace state */
--static int __net_init netdev_init(struct net *net)
--{
-- INIT_LIST_HEAD(&net->dev_base_head);
--
-- net->dev_name_head = netdev_create_hash();
-- if (net->dev_name_head == NULL)
-- goto err_name;
--
-- net->dev_index_head = netdev_create_hash();
-- if (net->dev_index_head == NULL)
-- goto err_idx;
--
-- return 0;
--
--err_idx:
-- kfree(net->dev_name_head);
--err_name:
-- return -ENOMEM;
--}
--
--char *netdev_drivername(struct net_device *dev, char *buffer, int len)
--{
-- struct device_driver *driver;
-- struct device *parent;
--
-- if (len <= 0 || !buffer)
-- return buffer;
-- buffer[0] = 0;
--
-- parent = dev->dev.parent;
--
-- if (!parent)
-- return buffer;
--
-- driver = parent->driver;
-- if (driver && driver->name)
-- strlcpy(buffer, driver->name, len);
-- return buffer;
--}
--
--static void __net_exit netdev_exit(struct net *net)
--{
-- kfree(net->dev_name_head);
-- kfree(net->dev_index_head);
--}
--
--static struct pernet_operations __net_initdata netdev_net_ops = {
-- .init = netdev_init,
-- .exit = netdev_exit,
--};
--
--static void __net_exit default_device_exit(struct net *net)
--{
-- struct net_device *dev, *next;
-- /*
-- * Push all migratable of the network devices back to the
-- * initial network namespace
-- */
-- rtnl_lock();
-- for_each_netdev_safe(net, dev, next) {
-- int err;
-- char fb_name[IFNAMSIZ];
--
-- /* Ignore unmoveable devices (i.e. loopback) */
-- if (dev->features & NETIF_F_NETNS_LOCAL)
-- continue;
--
-- /* Push remaing network devices to init_net */
-- snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
-- err = dev_change_net_namespace(dev, &init_net, fb_name);
-- if (err) {
-- printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
-- __func__, dev->name, err);
-- BUG();
-- }
-- }
-- rtnl_unlock();
--}
--
--static struct pernet_operations __net_initdata default_device_ops = {
-- .exit = default_device_exit,
--};
--
--/*
-- * Initialize the DEV module. At boot time this walks the device list and
-- * unhooks any devices that fail to initialise (normally hardware not
-- * present) and leaves us with a valid list of present and active devices.
-- *
-- */
--
--/*
-- * This is called single threaded during boot, so no need
-- * to take the rtnl semaphore.
-- */
--static int __init net_dev_init(void)
--{
-- int i, rc = -ENOMEM;
--
-- BUG_ON(!dev_boot_phase);
--
-- if (dev_proc_init())
-- goto out;
--
-- if (netdev_kobject_init())
-- goto out;
--
-- INIT_LIST_HEAD(&ptype_all);
-- for (i = 0; i < PTYPE_HASH_SIZE; i++)
-- INIT_LIST_HEAD(&ptype_base[i]);
--
-- if (register_pernet_subsys(&netdev_net_ops))
-- goto out;
--
-- if (register_pernet_device(&default_device_ops))
-- goto out;
--
-- /*
-- * Initialise the packet receive queues.
-- */
--
-- for_each_possible_cpu(i) {
-- struct softnet_data *queue;
--
-- queue = &per_cpu(softnet_data, i);
-- skb_queue_head_init(&queue->input_pkt_queue);
-- queue->completion_queue = NULL;
-- INIT_LIST_HEAD(&queue->poll_list);
--
-- queue->backlog.poll = process_backlog;
-- queue->backlog.weight = weight_p;
-- }
--
-- netdev_dma_register();
--
-- dev_boot_phase = 0;
--
-- open_softirq(NET_TX_SOFTIRQ, net_tx_action);
-- open_softirq(NET_RX_SOFTIRQ, net_rx_action);
--
-- hotcpu_notifier(dev_cpu_callback, 0);
-- dst_init();
-- dev_mcast_init();
-- rc = 0;
--out:
-- return rc;
--}
--
--subsys_initcall(net_dev_init);
--
--EXPORT_SYMBOL(__dev_get_by_index);
--EXPORT_SYMBOL(__dev_get_by_name);
--EXPORT_SYMBOL(__dev_remove_pack);
--EXPORT_SYMBOL(dev_valid_name);
--EXPORT_SYMBOL(dev_add_pack);
--EXPORT_SYMBOL(dev_alloc_name);
--EXPORT_SYMBOL(dev_close);
--EXPORT_SYMBOL(dev_get_by_flags);
--EXPORT_SYMBOL(dev_get_by_index);
--EXPORT_SYMBOL(dev_get_by_name);
--EXPORT_SYMBOL(dev_open);
--EXPORT_SYMBOL(dev_queue_xmit);
--EXPORT_SYMBOL(dev_remove_pack);
--EXPORT_SYMBOL(dev_set_allmulti);
--EXPORT_SYMBOL(dev_set_promiscuity);
--EXPORT_SYMBOL(dev_change_flags);
--EXPORT_SYMBOL(dev_set_mtu);
--EXPORT_SYMBOL(dev_set_mac_address);
--EXPORT_SYMBOL(free_netdev);
--EXPORT_SYMBOL(netdev_boot_setup_check);
--EXPORT_SYMBOL(netdev_set_master);
--EXPORT_SYMBOL(netdev_state_change);
--EXPORT_SYMBOL(netif_receive_skb);
--EXPORT_SYMBOL(netif_rx);
--EXPORT_SYMBOL(register_gifconf);
--EXPORT_SYMBOL(register_netdevice);
--EXPORT_SYMBOL(register_netdevice_notifier);
--EXPORT_SYMBOL(skb_checksum_help);
--EXPORT_SYMBOL(synchronize_net);
--EXPORT_SYMBOL(unregister_netdevice);
--EXPORT_SYMBOL(unregister_netdevice_notifier);
--EXPORT_SYMBOL(net_enable_timestamp);
--EXPORT_SYMBOL(net_disable_timestamp);
--EXPORT_SYMBOL(dev_get_flags);
--EXPORT_PER_CPU_SYMBOL(sknid_elevator);
--
--#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
--EXPORT_SYMBOL(br_handle_frame_hook);
--EXPORT_SYMBOL(br_fdb_get_hook);
--EXPORT_SYMBOL(br_fdb_put_hook);
--#endif
--
--#ifdef CONFIG_KMOD
--EXPORT_SYMBOL(dev_load);
--#endif
--
--EXPORT_PER_CPU_SYMBOL(softnet_data);
-diff -Nurb linux-2.6.27-720/net/core/dev.c.rej linux-2.6.27-710/net/core/dev.c.rej
---- linux-2.6.27-720/net/core/dev.c.rej 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/net/core/dev.c.rej 1969-12-31 19:00:00.000000000 -0500
-@@ -1,30 +0,0 @@
--***************
--*** 2187,2199 ****
-- * NET_RX_SUCCESS: no congestion
-- * NET_RX_DROP: packet was dropped
-- */
--- int netif_receive_skb(struct sk_buff *skb)
-- {
-- struct packet_type *ptype, *pt_prev;
-- struct net_device *orig_dev;
-- struct net_device *null_or_orig;
-- int ret = NET_RX_DROP;
--- __be16 type;
--
-- /* if we've gotten here through NAPI, check netpoll */
-- if (netpoll_receive_skb(skb))
----- 2215,2228 ----
-- * NET_RX_SUCCESS: no congestion
-- * NET_RX_DROP: packet was dropped
-- */
--+ //int netif_receive_skb(struct sk_buff *skb)
--+ int __netif_receive_skb(struct sk_buff *skb, unsigned short type, int notifier_data)
-- {
-- struct packet_type *ptype, *pt_prev;
-- struct net_device *orig_dev;
-- struct net_device *null_or_orig;
-- int ret = NET_RX_DROP;
--+ // __be16 type;
--
-- /* if we've gotten here through NAPI, check netpoll */
-- if (netpoll_receive_skb(skb))
-diff -Nurb linux-2.6.27-720/net/core/neighbour.c linux-2.6.27-710/net/core/neighbour.c
---- linux-2.6.27-720/net/core/neighbour.c 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/net/core/neighbour.c 2008-10-09 18:13:53.000000000 -0400
-@@ -2702,7 +2702,7 @@
-
- int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
- int p_id, int pdev_id, char *p_name,
-- proc_handler_t *handler, ctl_handler *strategy)
-+ proc_handler *handler, ctl_handler *strategy)
- {
- struct neigh_sysctl_table *t;
- const char *dev_name_source = NULL;
-diff -Nurb linux-2.6.27-720/net/core/skbuff.c linux-2.6.27-710/net/core/skbuff.c
---- linux-2.6.27-720/net/core/skbuff.c 2009-05-04 12:18:34.000000000 -0400
-+++ linux-2.6.27-710/net/core/skbuff.c 2009-05-04 12:15:31.000000000 -0400
-@@ -575,112 +575,6 @@
- skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
- }
-
--
--/* Click: clear skb header state */
--static inline void skb_headerinit(void *p, struct kmem_cache *cache,
-- unsigned long flags)
--{
-- struct sk_buff *skb = p;
--
-- skb->next = NULL;
-- skb->prev = NULL;
-- skb->sk = NULL;
-- skb->tstamp.tv64 = 0; /* No idea about time */
-- skb->dev = NULL;
-- skb->iif = 0;
-- skb->dst = NULL;
-- skb->sp = NULL;
-- memset(skb->cb, 0, sizeof(skb->cb));
-- skb->priority = 0;
-- skb->pkt_type = PACKET_HOST; /* Default type */
-- skb->ip_summed = 0;
-- skb->destructor = NULL;
--
--#ifdef CONFIG_NETFILTER
-- skb->mark = 0;
-- skb->nfct = NULL;
--# if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-- skb->nfct_reasm = NULL;
--# endif
--# ifdef CONFIG_BRIDGE_NETFILTER
-- skb->nf_bridge = NULL;
--# endif
--#endif
--#ifdef CONFIG_NET_SCHED
-- skb->tc_index = 0;
--# ifdef CONFIG_NET_CLS_ACT
-- skb->tc_verd = 0;
--# endif
--#endif
--}
--
--/* Click: attempts to recycle a sk_buff. if it can be recycled, return it */
--struct sk_buff *skb_recycle(struct sk_buff *skb)
--{
-- if (atomic_dec_and_test(&skb->users)) {
-- dst_release(skb->dst);
--#ifdef CONFIG_XFRM
-- secpath_put(skb->sp);
--#endif
-- if(skb->destructor) {
-- WARN_ON(in_irq());
-- skb->destructor(skb);
-- }
--#ifdef CONFIG_NETFILTER
-- nf_conntrack_put(skb->nfct);
--# if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-- nf_conntrack_put_reasm(skb->nfct_reasm);
--# endif
--# ifdef CONFIG_BRIDGE_NETFILTER
-- nf_bridge_put(skb->nf_bridge);
--# endif
--#endif
-- skb_headerinit(skb, NULL, 0);
--
-- if (skb->fclone == SKB_FCLONE_UNAVAILABLE
-- && (!skb->cloned ||
-- atomic_read(&skb_shinfo(skb)->dataref) == (skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1))) {
-- /* Don't need to atomic_sub skb_shinfo(skb)->dataref,
-- as we set that to 1 below. */
--
-- if (skb_shinfo(skb)->nr_frags) {
-- int i;
-- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-- put_page(skb_shinfo(skb)->frags[i].page);
-- /* Jason Park patch */
-- skb_shinfo(skb)->nr_frags = 0;
-- }
--
-- if (skb_shinfo(skb)->frag_list)
-- skb_drop_fraglist(skb);
--
-- /* Load the data pointers. */
-- skb->data = skb->head;
-- skb->tail = skb->data;
-- /* end and truesize should have never changed */
-- /* skb->end = skb->data + skb->truesize; */
--
-- /* set up other state */
-- skb->len = 0;
-- skb->cloned = 0;
--
-- atomic_set(&skb->users, 1);
-- atomic_set(&(skb_shinfo(skb)->dataref), 1);
-- /* Jason Park patch */
-- skb_shinfo(skb)->gso_size = 0;
-- skb_shinfo(skb)->gso_segs = 0;
-- skb_shinfo(skb)->gso_type = 0;
-- skb_shinfo(skb)->ip6_frag_id = 0;
--
-- return skb;
-- }
--
-- kfree_skbmem(skb);
-- }
--
-- return 0;
--}
--
- /**
- * skb_copy - create private copy of an sk_buff
- * @skb: buffer to copy
-@@ -2710,7 +2604,6 @@
- EXPORT_SYMBOL(skb_append_datato_frags);
- EXPORT_SYMBOL(__skb_warn_lro_forwarding);
-
--EXPORT_SYMBOL(skb_recycle);
- EXPORT_SYMBOL_GPL(skb_to_sgvec);
- EXPORT_SYMBOL_GPL(skb_cow_data);
- EXPORT_SYMBOL_GPL(skb_partial_csum_set);
-diff -Nurb linux-2.6.27-720/net/core/skbuff.c.orig linux-2.6.27-710/net/core/skbuff.c.orig
---- linux-2.6.27-720/net/core/skbuff.c.orig 2009-05-04 12:15:31.000000000 -0400
-+++ linux-2.6.27-710/net/core/skbuff.c.orig 1969-12-31 19:00:00.000000000 -0500
-@@ -1,2609 +0,0 @@
--/*
-- * Routines having to do with the 'struct sk_buff' memory handlers.
-- *
-- * Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
-- * Florian La Roche <rzsfl@rz.uni-sb.de>
-- *
-- * Fixes:
-- * Alan Cox : Fixed the worst of the load
-- * balancer bugs.
-- * Dave Platt : Interrupt stacking fix.
-- * Richard Kooijman : Timestamp fixes.
-- * Alan Cox : Changed buffer format.
-- * Alan Cox : destructor hook for AF_UNIX etc.
-- * Linus Torvalds : Better skb_clone.
-- * Alan Cox : Added skb_copy.
-- * Alan Cox : Added all the changed routines Linus
-- * only put in the headers
-- * Ray VanTassle : Fixed --skb->lock in free
-- * Alan Cox : skb_copy copy arp field
-- * Andi Kleen : slabified it.
-- * Robert Olsson : Removed skb_head_pool
-- *
-- * NOTE:
-- * The __skb_ routines should be called with interrupts
-- * disabled, or you better be *real* sure that the operation is atomic
-- * with respect to whatever list is being frobbed (e.g. via lock_sock()
-- * or via disabling bottom half handlers, etc).
-- *
-- * This program is free software; you can redistribute it and/or
-- * modify it under the terms of the GNU General Public License
-- * as published by the Free Software Foundation; either version
-- * 2 of the License, or (at your option) any later version.
-- */
--
--/*
-- * The functions in this file will not compile correctly with gcc 2.4.x
-- */
--
--#include <linux/module.h>
--#include <linux/types.h>
--#include <linux/kernel.h>
--#include <linux/mm.h>
--#include <linux/interrupt.h>
--#include <linux/in.h>
--#include <linux/inet.h>
--#include <linux/slab.h>
--#include <linux/netdevice.h>
--#ifdef CONFIG_NET_CLS_ACT
--#include <net/pkt_sched.h>
--#endif
--#include <linux/string.h>
--#include <linux/skbuff.h>
--#include <linux/splice.h>
--#include <linux/cache.h>
--#include <linux/rtnetlink.h>
--#include <linux/init.h>
--#include <linux/scatterlist.h>
--#include <linux/vs_network.h>
--
--#include <net/protocol.h>
--#include <net/dst.h>
--#include <net/sock.h>
--#include <net/checksum.h>
--#include <net/xfrm.h>
--
--#include <asm/uaccess.h>
--#include <asm/system.h>
--
--#include "kmap_skb.h"
--
--static struct kmem_cache *skbuff_head_cache __read_mostly;
--static struct kmem_cache *skbuff_fclone_cache __read_mostly;
--
--static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
-- struct pipe_buffer *buf)
--{
-- struct sk_buff *skb = (struct sk_buff *) buf->private;
--
-- kfree_skb(skb);
--}
--
--static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
-- struct pipe_buffer *buf)
--{
-- struct sk_buff *skb = (struct sk_buff *) buf->private;
--
-- skb_get(skb);
--}
--
--static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
-- struct pipe_buffer *buf)
--{
-- return 1;
--}
--
--
--/* Pipe buffer operations for a socket. */
--static struct pipe_buf_operations sock_pipe_buf_ops = {
-- .can_merge = 0,
-- .map = generic_pipe_buf_map,
-- .unmap = generic_pipe_buf_unmap,
-- .confirm = generic_pipe_buf_confirm,
-- .release = sock_pipe_buf_release,
-- .steal = sock_pipe_buf_steal,
-- .get = sock_pipe_buf_get,
--};
--
--/*
-- * Keep out-of-line to prevent kernel bloat.
-- * __builtin_return_address is not used because it is not always
-- * reliable.
-- */
--
--/**
-- * skb_over_panic - private function
-- * @skb: buffer
-- * @sz: size
-- * @here: address
-- *
-- * Out of line support code for skb_put(). Not user callable.
-- */
--void skb_over_panic(struct sk_buff *skb, int sz, void *here)
--{
-- printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
-- "data:%p tail:%#lx end:%#lx dev:%s\n",
-- here, skb->len, sz, skb->head, skb->data,
-- (unsigned long)skb->tail, (unsigned long)skb->end,
-- skb->dev ? skb->dev->name : "<NULL>");
-- BUG();
--}
--
--/**
-- * skb_under_panic - private function
-- * @skb: buffer
-- * @sz: size
-- * @here: address
-- *
-- * Out of line support code for skb_push(). Not user callable.
-- */
--
--void skb_under_panic(struct sk_buff *skb, int sz, void *here)
--{
-- printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
-- "data:%p tail:%#lx end:%#lx dev:%s\n",
-- here, skb->len, sz, skb->head, skb->data,
-- (unsigned long)skb->tail, (unsigned long)skb->end,
-- skb->dev ? skb->dev->name : "<NULL>");
-- BUG();
--}
--
--void skb_truesize_bug(struct sk_buff *skb)
--{
-- printk(KERN_ERR "SKB BUG: Invalid truesize (%u) "
-- "len=%u, sizeof(sk_buff)=%Zd\n",
-- skb->truesize, skb->len, sizeof(struct sk_buff));
--}
--EXPORT_SYMBOL(skb_truesize_bug);
--
--/* Allocate a new skbuff. We do this ourselves so we can fill in a few
-- * 'private' fields and also do memory statistics to find all the
-- * [BEEP] leaks.
-- *
-- */
--
--/**
-- * __alloc_skb - allocate a network buffer
-- * @size: size to allocate
-- * @gfp_mask: allocation mask
-- * @fclone: allocate from fclone cache instead of head cache
-- * and allocate a cloned (child) skb
-- * @node: numa node to allocate memory on
-- *
-- * Allocate a new &sk_buff. The returned buffer has no headroom and a
-- * tail room of size bytes. The object has a reference count of one.
-- * The return is the buffer. On a failure the return is %NULL.
-- *
-- * Buffers may only be allocated from interrupts using a @gfp_mask of
-- * %GFP_ATOMIC.
-- */
--struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
-- int fclone, int node)
--{
-- struct kmem_cache *cache;
-- struct skb_shared_info *shinfo;
-- struct sk_buff *skb;
-- u8 *data;
--
-- cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
--
-- /* Get the HEAD */
-- skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
-- if (!skb)
-- goto out;
--
-- size = SKB_DATA_ALIGN(size);
-- data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
-- gfp_mask, node);
-- if (!data)
-- goto nodata;
--
-- /*
-- * Only clear those fields we need to clear, not those that we will
-- * actually initialise below. Hence, don't put any more fields after
-- * the tail pointer in struct sk_buff!
-- */
-- memset(skb, 0, offsetof(struct sk_buff, tail));
-- skb->truesize = size + sizeof(struct sk_buff);
-- atomic_set(&skb->users, 1);
-- skb->head = data;
-- skb->data = data;
-- skb_reset_tail_pointer(skb);
-- skb->end = skb->tail + size;
-- if (!in_interrupt()) skb->skb_tag = nx_current_nid(); else skb->skb_tag = 0;
-- /* make sure we initialize shinfo sequentially */
-- shinfo = skb_shinfo(skb);
-- atomic_set(&shinfo->dataref, 1);
-- shinfo->nr_frags = 0;
-- shinfo->gso_size = 0;
-- shinfo->gso_segs = 0;
-- shinfo->gso_type = 0;
-- shinfo->ip6_frag_id = 0;
-- shinfo->frag_list = NULL;
--
-- if (fclone) {
-- struct sk_buff *child = skb + 1;
-- atomic_t *fclone_ref = (atomic_t *) (child + 1);
--
-- skb->fclone = SKB_FCLONE_ORIG;
-- atomic_set(fclone_ref, 1);
--
-- child->fclone = SKB_FCLONE_UNAVAILABLE;
-- }
--out:
-- return skb;
--nodata:
-- kmem_cache_free(cache, skb);
-- skb = NULL;
-- goto out;
--}
--
--/**
-- * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
-- * @dev: network device to receive on
-- * @length: length to allocate
-- * @gfp_mask: get_free_pages mask, passed to alloc_skb
-- *
-- * Allocate a new &sk_buff and assign it a usage count of one. The
-- * buffer has unspecified headroom built in. Users should allocate
-- * the headroom they think they need without accounting for the
-- * built in space. The built in space is used for optimisations.
-- *
-- * %NULL is returned if there is no free memory.
-- */
--struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
-- unsigned int length, gfp_t gfp_mask)
--{
-- int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
-- struct sk_buff *skb;
--
-- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
-- if (likely(skb)) {
-- skb_reserve(skb, NET_SKB_PAD);
-- skb->dev = dev;
-- }
-- return skb;
--}
--
--/**
-- * dev_alloc_skb - allocate an skbuff for receiving
-- * @length: length to allocate
-- *
-- * Allocate a new &sk_buff and assign it a usage count of one. The
-- * buffer has unspecified headroom built in. Users should allocate
-- * the headroom they think they need without accounting for the
-- * built in space. The built in space is used for optimisations.
-- *
-- * %NULL is returned if there is no free memory. Although this function
-- * allocates memory it can be called from an interrupt.
-- */
--struct sk_buff *dev_alloc_skb(unsigned int length)
--{
-- /*
-- * There is more code here than it seems:
-- * __dev_alloc_skb is an inline
-- */
-- return __dev_alloc_skb(length, GFP_ATOMIC);
--}
--EXPORT_SYMBOL(dev_alloc_skb);
--
--static void skb_drop_list(struct sk_buff **listp)
--{
-- struct sk_buff *list = *listp;
--
-- *listp = NULL;
--
-- do {
-- struct sk_buff *this = list;
-- list = list->next;
-- kfree_skb(this);
-- } while (list);
--}
--
--static inline void skb_drop_fraglist(struct sk_buff *skb)
--{
-- skb_drop_list(&skb_shinfo(skb)->frag_list);
--}
--
--static void skb_clone_fraglist(struct sk_buff *skb)
--{
-- struct sk_buff *list;
--
-- for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
-- skb_get(list);
--}
--
--static void skb_release_data(struct sk_buff *skb)
--{
-- if (!skb->cloned ||
-- !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
-- &skb_shinfo(skb)->dataref)) {
-- if (skb_shinfo(skb)->nr_frags) {
-- int i;
-- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-- put_page(skb_shinfo(skb)->frags[i].page);
-- }
--
-- if (skb_shinfo(skb)->frag_list)
-- skb_drop_fraglist(skb);
--
-- kfree(skb->head);
-- }
--}
--
--/*
-- * Free an skbuff by memory without cleaning the state.
-- */
--static void kfree_skbmem(struct sk_buff *skb)
--{
-- struct sk_buff *other;
-- atomic_t *fclone_ref;
--
-- switch (skb->fclone) {
-- case SKB_FCLONE_UNAVAILABLE:
-- kmem_cache_free(skbuff_head_cache, skb);
-- break;
--
-- case SKB_FCLONE_ORIG:
-- fclone_ref = (atomic_t *) (skb + 2);
-- if (atomic_dec_and_test(fclone_ref))
-- kmem_cache_free(skbuff_fclone_cache, skb);
-- break;
--
-- case SKB_FCLONE_CLONE:
-- fclone_ref = (atomic_t *) (skb + 1);
-- other = skb - 1;
--
-- /* The clone portion is available for
-- * fast-cloning again.
-- */
-- skb->fclone = SKB_FCLONE_UNAVAILABLE;
--
-- if (atomic_dec_and_test(fclone_ref))
-- kmem_cache_free(skbuff_fclone_cache, other);
-- break;
-- }
--}
--
--/* Free everything but the sk_buff shell. */
--static void skb_release_all(struct sk_buff *skb)
--{
-- dst_release(skb->dst);
--#ifdef CONFIG_XFRM
-- secpath_put(skb->sp);
--#endif
-- if (skb->destructor) {
-- WARN_ON(in_irq());
-- skb->destructor(skb);
-- }
--#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-- nf_conntrack_put(skb->nfct);
-- nf_conntrack_put_reasm(skb->nfct_reasm);
--#endif
--#ifdef CONFIG_BRIDGE_NETFILTER
-- nf_bridge_put(skb->nf_bridge);
--#endif
--/* XXX: IS this still necessary? - JHS */
--#ifdef CONFIG_NET_SCHED
-- skb->tc_index = 0;
--#ifdef CONFIG_NET_CLS_ACT
-- skb->tc_verd = 0;
--#endif
--#endif
-- skb_release_data(skb);
--}
--
--/**
-- * __kfree_skb - private function
-- * @skb: buffer
-- *
-- * Free an sk_buff. Release anything attached to the buffer.
-- * Clean the state. This is an internal helper function. Users should
-- * always call kfree_skb
-- */
--
--void __kfree_skb(struct sk_buff *skb)
--{
-- skb_release_all(skb);
-- kfree_skbmem(skb);
--}
--
--/**
-- * kfree_skb - free an sk_buff
-- * @skb: buffer to free
-- *
-- * Drop a reference to the buffer and free it if the usage count has
-- * hit zero.
-- */
--void kfree_skb(struct sk_buff *skb)
--{
-- if (unlikely(!skb))
-- return;
-- if (likely(atomic_read(&skb->users) == 1))
-- smp_rmb();
-- else if (likely(!atomic_dec_and_test(&skb->users)))
-- return;
-- __kfree_skb(skb);
--}
--
--static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
--{
-- new->tstamp = old->tstamp;
-- new->dev = old->dev;
-- new->transport_header = old->transport_header;
-- new->network_header = old->network_header;
-- new->mac_header = old->mac_header;
-- new->dst = dst_clone(old->dst);
--#ifdef CONFIG_INET
-- new->sp = secpath_get(old->sp);
--#endif
-- memcpy(new->cb, old->cb, sizeof(old->cb));
-- new->csum_start = old->csum_start;
-- new->csum_offset = old->csum_offset;
-- new->local_df = old->local_df;
-- new->pkt_type = old->pkt_type;
-- new->ip_summed = old->ip_summed;
-- skb_copy_queue_mapping(new, old);
-- new->priority = old->priority;
--#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
-- new->ipvs_property = old->ipvs_property;
--#endif
-- new->protocol = old->protocol;
-- new->mark = old->mark;
-- __nf_copy(new, old);
--#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
-- defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
-- new->nf_trace = old->nf_trace;
--#endif
--#ifdef CONFIG_NET_SCHED
-- new->tc_index = old->tc_index;
--#ifdef CONFIG_NET_CLS_ACT
-- new->tc_verd = old->tc_verd;
--#endif
--#endif
-- new->vlan_tci = old->vlan_tci;
-- new->skb_tag = old->skb_tag;
--
-- skb_copy_secmark(new, old);
--}
--
--static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
--{
--#define C(x) n->x = skb->x
--
-- n->next = n->prev = NULL;
-- n->sk = NULL;
-- __copy_skb_header(n, skb);
--
-- C(len);
-- C(data_len);
-- C(mac_len);
-- n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
-- n->cloned = 1;
-- n->nohdr = 0;
-- n->destructor = NULL;
-- C(iif);
-- C(tail);
-- C(end);
-- C(head);
-- C(data);
-- C(truesize);
--#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
-- C(do_not_encrypt);
--#endif
-- atomic_set(&n->users, 1);
--
-- /* Sapan: Cloned skbs aren't owned by anyone. Let the cloner decide who it belongs to. */
--
-- atomic_inc(&(skb_shinfo(skb)->dataref));
-- skb->cloned = 1;
--
-- return n;
--#undef C
--}
--
--/**
-- * skb_morph - morph one skb into another
-- * @dst: the skb to receive the contents
-- * @src: the skb to supply the contents
-- *
-- * This is identical to skb_clone except that the target skb is
-- * supplied by the user.
-- *
-- * The target skb is returned upon exit.
-- */
--struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
--{
-- skb_release_all(dst);
-- return __skb_clone(dst, src);
--}
--EXPORT_SYMBOL_GPL(skb_morph);
--
--/**
-- * skb_clone - duplicate an sk_buff
-- * @skb: buffer to clone
-- * @gfp_mask: allocation priority
-- *
-- * Duplicate an &sk_buff. The new one is not owned by a socket. Both
-- * copies share the same packet data but not structure. The new
-- * buffer has a reference count of 1. If the allocation fails the
-- * function returns %NULL otherwise the new buffer is returned.
-- *
-- * If this function is called from an interrupt gfp_mask() must be
-- * %GFP_ATOMIC.
-- */
--
--struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
--{
-- struct sk_buff *n;
--
-- n = skb + 1;
-- if (skb->fclone == SKB_FCLONE_ORIG &&
-- n->fclone == SKB_FCLONE_UNAVAILABLE) {
-- atomic_t *fclone_ref = (atomic_t *) (n + 1);
-- n->fclone = SKB_FCLONE_CLONE;
-- atomic_inc(fclone_ref);
-- } else {
-- n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
-- if (!n)
-- return NULL;
-- n->fclone = SKB_FCLONE_UNAVAILABLE;
-- }
--
-- return __skb_clone(n, skb);
--}
--
--static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
--{
--#ifndef NET_SKBUFF_DATA_USES_OFFSET
-- /*
-- * Shift between the two data areas in bytes
-- */
-- unsigned long offset = new->data - old->data;
--#endif
--
-- __copy_skb_header(new, old);
--
--#ifndef NET_SKBUFF_DATA_USES_OFFSET
-- /* {transport,network,mac}_header are relative to skb->head */
-- new->transport_header += offset;
-- new->network_header += offset;
-- new->mac_header += offset;
--#endif
-- skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
-- skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
-- skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
--}
--
--/**
-- * skb_copy - create private copy of an sk_buff
-- * @skb: buffer to copy
-- * @gfp_mask: allocation priority
-- *
-- * Make a copy of both an &sk_buff and its data. This is used when the
-- * caller wishes to modify the data and needs a private copy of the
-- * data to alter. Returns %NULL on failure or the pointer to the buffer
-- * on success. The returned buffer has a reference count of 1.
-- *
-- * As by-product this function converts non-linear &sk_buff to linear
-- * one, so that &sk_buff becomes completely private and caller is allowed
-- * to modify all the data of returned buffer. This means that this
-- * function is not recommended for use in circumstances when only
-- * header is going to be modified. Use pskb_copy() instead.
-- */
--
--struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
--{
-- int headerlen = skb->data - skb->head;
-- /*
-- * Allocate the copy buffer
-- */
-- struct sk_buff *n;
--#ifdef NET_SKBUFF_DATA_USES_OFFSET
-- n = alloc_skb(skb->end + skb->data_len, gfp_mask);
--#else
-- n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
--#endif
-- if (!n)
-- return NULL;
--
-- /* Set the data pointer */
-- skb_reserve(n, headerlen);
-- /* Set the tail pointer and length */
-- skb_put(n, skb->len);
--
-- if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
-- BUG();
--
-- copy_skb_header(n, skb);
-- return n;
--}
--
--
--/**
-- * pskb_copy - create copy of an sk_buff with private head.
-- * @skb: buffer to copy
-- * @gfp_mask: allocation priority
-- *
-- * Make a copy of both an &sk_buff and part of its data, located
-- * in header. Fragmented data remain shared. This is used when
-- * the caller wishes to modify only header of &sk_buff and needs
-- * private copy of the header to alter. Returns %NULL on failure
-- * or the pointer to the buffer on success.
-- * The returned buffer has a reference count of 1.
-- */
--
--struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
--{
-- /*
-- * Allocate the copy buffer
-- */
-- struct sk_buff *n;
--#ifdef NET_SKBUFF_DATA_USES_OFFSET
-- n = alloc_skb(skb->end, gfp_mask);
--#else
-- n = alloc_skb(skb->end - skb->head, gfp_mask);
--#endif
-- if (!n)
-- goto out;
--
-- /* Set the data pointer */
-- skb_reserve(n, skb->data - skb->head);
-- /* Set the tail pointer and length */
-- skb_put(n, skb_headlen(skb));
-- /* Copy the bytes */
-- skb_copy_from_linear_data(skb, n->data, n->len);
--
-- n->truesize += skb->data_len;
-- n->data_len = skb->data_len;
-- n->len = skb->len;
--
-- if (skb_shinfo(skb)->nr_frags) {
-- int i;
--
-- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-- skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
-- get_page(skb_shinfo(n)->frags[i].page);
-- }
-- skb_shinfo(n)->nr_frags = i;
-- }
--
-- if (skb_shinfo(skb)->frag_list) {
-- skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
-- skb_clone_fraglist(n);
-- }
--
-- copy_skb_header(n, skb);
--out:
-- return n;
--}
--
--/**
-- * pskb_expand_head - reallocate header of &sk_buff
-- * @skb: buffer to reallocate
-- * @nhead: room to add at head
-- * @ntail: room to add at tail
-- * @gfp_mask: allocation priority
-- *
-- * Expands (or creates identical copy, if &nhead and &ntail are zero)
-- * header of skb. &sk_buff itself is not changed. &sk_buff MUST have
-- * reference count of 1. Returns zero in the case of success or error,
-- * if expansion failed. In the last case, &sk_buff is not changed.
-- *
-- * All the pointers pointing into skb header may change and must be
-- * reloaded after call to this function.
-- */
--
--int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
-- gfp_t gfp_mask)
--{
-- int i;
-- u8 *data;
--#ifdef NET_SKBUFF_DATA_USES_OFFSET
-- int size = nhead + skb->end + ntail;
--#else
-- int size = nhead + (skb->end - skb->head) + ntail;
--#endif
-- long off;
--
-- if (skb_shared(skb))
-- BUG();
--
-- size = SKB_DATA_ALIGN(size);
--
-- data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
-- if (!data)
-- goto nodata;
--
-- /* Copy only real data... and, alas, header. This should be
-- * optimized for the cases when header is void. */
--#ifdef NET_SKBUFF_DATA_USES_OFFSET
-- memcpy(data + nhead, skb->head, skb->tail);
--#else
-- memcpy(data + nhead, skb->head, skb->tail - skb->head);
--#endif
-- memcpy(data + size, skb_end_pointer(skb),
-- sizeof(struct skb_shared_info));
--
-- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-- get_page(skb_shinfo(skb)->frags[i].page);
--
-- if (skb_shinfo(skb)->frag_list)
-- skb_clone_fraglist(skb);
--
-- skb_release_data(skb);
--
-- off = (data + nhead) - skb->head;
--
-- skb->head = data;
-- skb->data += off;
--#ifdef NET_SKBUFF_DATA_USES_OFFSET
-- skb->end = size;
-- off = nhead;
--#else
-- skb->end = skb->head + size;
--#endif
-- /* {transport,network,mac}_header and tail are relative to skb->head */
-- skb->tail += off;
-- skb->transport_header += off;
-- skb->network_header += off;
-- skb->mac_header += off;
-- skb->csum_start += nhead;
-- skb->cloned = 0;
-- skb->hdr_len = 0;
-- skb->nohdr = 0;
-- atomic_set(&skb_shinfo(skb)->dataref, 1);
-- return 0;
--
--nodata:
-- return -ENOMEM;
--}
--
--/* Make private copy of skb with writable head and some headroom */
--
--struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
--{
-- struct sk_buff *skb2;
-- int delta = headroom - skb_headroom(skb);
--
-- if (delta <= 0)
-- skb2 = pskb_copy(skb, GFP_ATOMIC);
-- else {
-- skb2 = skb_clone(skb, GFP_ATOMIC);
-- if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
-- GFP_ATOMIC)) {
-- kfree_skb(skb2);
-- skb2 = NULL;
-- }
-- }
-- return skb2;
--}
--
--
--/**
-- * skb_copy_expand - copy and expand sk_buff
-- * @skb: buffer to copy
-- * @newheadroom: new free bytes at head
-- * @newtailroom: new free bytes at tail
-- * @gfp_mask: allocation priority
-- *
-- * Make a copy of both an &sk_buff and its data and while doing so
-- * allocate additional space.
-- *
-- * This is used when the caller wishes to modify the data and needs a
-- * private copy of the data to alter as well as more space for new fields.
-- * Returns %NULL on failure or the pointer to the buffer
-- * on success. The returned buffer has a reference count of 1.
-- *
-- * You must pass %GFP_ATOMIC as the allocation priority if this function
-- * is called from an interrupt.
-- */
--struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
-- int newheadroom, int newtailroom,
-- gfp_t gfp_mask)
--{
-- /*
-- * Allocate the copy buffer
-- */
-- struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
-- gfp_mask);
-- int oldheadroom = skb_headroom(skb);
-- int head_copy_len, head_copy_off;
-- int off;
--
-- if (!n)
-- return NULL;
--
-- skb_reserve(n, newheadroom);
--
-- /* Set the tail pointer and length */
-- skb_put(n, skb->len);
--
-- head_copy_len = oldheadroom;
-- head_copy_off = 0;
-- if (newheadroom <= head_copy_len)
-- head_copy_len = newheadroom;
-- else
-- head_copy_off = newheadroom - head_copy_len;
--
-- /* Copy the linear header and data. */
-- if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
-- skb->len + head_copy_len))
-- BUG();
--
-- copy_skb_header(n, skb);
--
-- off = newheadroom - oldheadroom;
-- n->csum_start += off;
--#ifdef NET_SKBUFF_DATA_USES_OFFSET
-- n->transport_header += off;
-- n->network_header += off;
-- n->mac_header += off;
--#endif
--
-- return n;
--}
--
--/**
-- * skb_pad - zero pad the tail of an skb
-- * @skb: buffer to pad
-- * @pad: space to pad
-- *
-- * Ensure that a buffer is followed by a padding area that is zero
-- * filled. Used by network drivers which may DMA or transfer data
-- * beyond the buffer end onto the wire.
-- *
-- * May return error in out of memory cases. The skb is freed on error.
-- */
--
--int skb_pad(struct sk_buff *skb, int pad)
--{
-- int err;
-- int ntail;
--
-- /* If the skbuff is non linear tailroom is always zero.. */
-- if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
-- memset(skb->data+skb->len, 0, pad);
-- return 0;
-- }
--
-- ntail = skb->data_len + pad - (skb->end - skb->tail);
-- if (likely(skb_cloned(skb) || ntail > 0)) {
-- err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
-- if (unlikely(err))
-- goto free_skb;
-- }
--
-- /* FIXME: The use of this function with non-linear skb's really needs
-- * to be audited.
-- */
-- err = skb_linearize(skb);
-- if (unlikely(err))
-- goto free_skb;
--
-- memset(skb->data + skb->len, 0, pad);
-- return 0;
--
--free_skb:
-- kfree_skb(skb);
-- return err;
--}
--
--/**
-- * skb_put - add data to a buffer
-- * @skb: buffer to use
-- * @len: amount of data to add
-- *
-- * This function extends the used data area of the buffer. If this would
-- * exceed the total buffer size the kernel will panic. A pointer to the
-- * first byte of the extra data is returned.
-- */
--unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
--{
-- unsigned char *tmp = skb_tail_pointer(skb);
-- SKB_LINEAR_ASSERT(skb);
-- skb->tail += len;
-- skb->len += len;
-- if (unlikely(skb->tail > skb->end))
-- skb_over_panic(skb, len, __builtin_return_address(0));
-- return tmp;
--}
--EXPORT_SYMBOL(skb_put);
--
--/**
-- * skb_push - add data to the start of a buffer
-- * @skb: buffer to use
-- * @len: amount of data to add
-- *
-- * This function extends the used data area of the buffer at the buffer
-- * start. If this would exceed the total buffer headroom the kernel will
-- * panic. A pointer to the first byte of the extra data is returned.
-- */
--unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
--{
-- skb->data -= len;
-- skb->len += len;
-- if (unlikely(skb->data<skb->head))
-- skb_under_panic(skb, len, __builtin_return_address(0));
-- return skb->data;
--}
--EXPORT_SYMBOL(skb_push);
--
--/**
-- * skb_pull - remove data from the start of a buffer
-- * @skb: buffer to use
-- * @len: amount of data to remove
-- *
-- * This function removes data from the start of a buffer, returning
-- * the memory to the headroom. A pointer to the next data in the buffer
-- * is returned. Once the data has been pulled future pushes will overwrite
-- * the old data.
-- */
--unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
--{
-- return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
--}
--EXPORT_SYMBOL(skb_pull);
--
--/**
-- * skb_trim - remove end from a buffer
-- * @skb: buffer to alter
-- * @len: new length
-- *
-- * Cut the length of a buffer down by removing data from the tail. If
-- * the buffer is already under the length specified it is not modified.
-- * The skb must be linear.
-- */
--void skb_trim(struct sk_buff *skb, unsigned int len)
--{
-- if (skb->len > len)
-- __skb_trim(skb, len);
--}
--EXPORT_SYMBOL(skb_trim);
--
--/* Trims skb to length len. It can change skb pointers.
-- */
--
--int ___pskb_trim(struct sk_buff *skb, unsigned int len)
--{
-- struct sk_buff **fragp;
-- struct sk_buff *frag;
-- int offset = skb_headlen(skb);
-- int nfrags = skb_shinfo(skb)->nr_frags;
-- int i;
-- int err;
--
-- if (skb_cloned(skb) &&
-- unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
-- return err;
--
-- i = 0;
-- if (offset >= len)
-- goto drop_pages;
--
-- for (; i < nfrags; i++) {
-- int end = offset + skb_shinfo(skb)->frags[i].size;
--
-- if (end < len) {
-- offset = end;
-- continue;
-- }
--
-- skb_shinfo(skb)->frags[i++].size = len - offset;
--
--drop_pages:
-- skb_shinfo(skb)->nr_frags = i;
--
-- for (; i < nfrags; i++)
-- put_page(skb_shinfo(skb)->frags[i].page);
--
-- if (skb_shinfo(skb)->frag_list)
-- skb_drop_fraglist(skb);
-- goto done;
-- }
--
-- for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
-- fragp = &frag->next) {
-- int end = offset + frag->len;
--
-- if (skb_shared(frag)) {
-- struct sk_buff *nfrag;
--
-- nfrag = skb_clone(frag, GFP_ATOMIC);
-- if (unlikely(!nfrag))
-- return -ENOMEM;
--
-- nfrag->next = frag->next;
-- kfree_skb(frag);
-- frag = nfrag;
-- *fragp = frag;
-- }
--
-- if (end < len) {
-- offset = end;
-- continue;
-- }
--
-- if (end > len &&
-- unlikely((err = pskb_trim(frag, len - offset))))
-- return err;
--
-- if (frag->next)
-- skb_drop_list(&frag->next);
-- break;
-- }
--
--done:
-- if (len > skb_headlen(skb)) {
-- skb->data_len -= skb->len - len;
-- skb->len = len;
-- } else {
-- skb->len = len;
-- skb->data_len = 0;
-- skb_set_tail_pointer(skb, len);
-- }
--
-- return 0;
--}
--
--/**
-- * __pskb_pull_tail - advance tail of skb header
-- * @skb: buffer to reallocate
-- * @delta: number of bytes to advance tail
-- *
-- * The function makes a sense only on a fragmented &sk_buff,
-- * it expands header moving its tail forward and copying necessary
-- * data from fragmented part.
-- *
-- * &sk_buff MUST have reference count of 1.
-- *
-- * Returns %NULL (and &sk_buff does not change) if pull failed
-- * or value of new tail of skb in the case of success.
-- *
-- * All the pointers pointing into skb header may change and must be
-- * reloaded after call to this function.
-- */
--
--/* Moves tail of skb head forward, copying data from fragmented part,
-- * when it is necessary.
-- * 1. It may fail due to malloc failure.
-- * 2. It may change skb pointers.
-- *
-- * It is pretty complicated. Luckily, it is called only in exceptional cases.
-- */
--unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
--{
-- /* If skb has not enough free space at tail, get new one
-- * plus 128 bytes for future expansions. If we have enough
-- * room at tail, reallocate without expansion only if skb is cloned.
-- */
-- int i, k, eat = (skb->tail + delta) - skb->end;
--
-- if (eat > 0 || skb_cloned(skb)) {
-- if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
-- GFP_ATOMIC))
-- return NULL;
-- }
--
-- if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
-- BUG();
--
-- /* Optimization: no fragments, no reasons to preestimate
-- * size of pulled pages. Superb.
-- */
-- if (!skb_shinfo(skb)->frag_list)
-- goto pull_pages;
--
-- /* Estimate size of pulled pages. */
-- eat = delta;
-- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-- if (skb_shinfo(skb)->frags[i].size >= eat)
-- goto pull_pages;
-- eat -= skb_shinfo(skb)->frags[i].size;
-- }
--
-- /* If we need update frag list, we are in troubles.
-- * Certainly, it possible to add an offset to skb data,
-- * but taking into account that pulling is expected to
-- * be very rare operation, it is worth to fight against
-- * further bloating skb head and crucify ourselves here instead.
-- * Pure masohism, indeed. 8)8)
-- */
-- if (eat) {
-- struct sk_buff *list = skb_shinfo(skb)->frag_list;
-- struct sk_buff *clone = NULL;
-- struct sk_buff *insp = NULL;
--
-- do {
-- BUG_ON(!list);
--
-- if (list->len <= eat) {
-- /* Eaten as whole. */
-- eat -= list->len;
-- list = list->next;
-- insp = list;
-- } else {
-- /* Eaten partially. */
--
-- if (skb_shared(list)) {
-- /* Sucks! We need to fork list. :-( */
-- clone = skb_clone(list, GFP_ATOMIC);
-- if (!clone)
-- return NULL;
-- insp = list->next;
-- list = clone;
-- } else {
-- /* This may be pulled without
-- * problems. */
-- insp = list;
-- }
-- if (!pskb_pull(list, eat)) {
-- if (clone)
-- kfree_skb(clone);
-- return NULL;
-- }
-- break;
-- }
-- } while (eat);
--
-- /* Free pulled out fragments. */
-- while ((list = skb_shinfo(skb)->frag_list) != insp) {
-- skb_shinfo(skb)->frag_list = list->next;
-- kfree_skb(list);
-- }
-- /* And insert new clone at head. */
-- if (clone) {
-- clone->next = list;
-- skb_shinfo(skb)->frag_list = clone;
-- }
-- }
-- /* Success! Now we may commit changes to skb data. */
--
--pull_pages:
-- eat = delta;
-- k = 0;
-- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-- if (skb_shinfo(skb)->frags[i].size <= eat) {
-- put_page(skb_shinfo(skb)->frags[i].page);
-- eat -= skb_shinfo(skb)->frags[i].size;
-- } else {
-- skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
-- if (eat) {
-- skb_shinfo(skb)->frags[k].page_offset += eat;
-- skb_shinfo(skb)->frags[k].size -= eat;
-- eat = 0;
-- }
-- k++;
-- }
-- }
-- skb_shinfo(skb)->nr_frags = k;
--
-- skb->tail += delta;
-- skb->data_len -= delta;
--
-- return skb_tail_pointer(skb);
--}
--
--/* Copy some data bits from skb to kernel buffer. */
--
--int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
--{
-- int i, copy;
-- int start = skb_headlen(skb);
--
-- if (offset > (int)skb->len - len)
-- goto fault;
--
-- /* Copy header. */
-- if ((copy = start - offset) > 0) {
-- if (copy > len)
-- copy = len;
-- skb_copy_from_linear_data_offset(skb, offset, to, copy);
-- if ((len -= copy) == 0)
-- return 0;
-- offset += copy;
-- to += copy;
-- }
--
-- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-- int end;
--
-- WARN_ON(start > offset + len);
--
-- end = start + skb_shinfo(skb)->frags[i].size;
-- if ((copy = end - offset) > 0) {
-- u8 *vaddr;
--
-- if (copy > len)
-- copy = len;
--
-- vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
-- memcpy(to,
-- vaddr + skb_shinfo(skb)->frags[i].page_offset+
-- offset - start, copy);
-- kunmap_skb_frag(vaddr);
--
-- if ((len -= copy) == 0)
-- return 0;
-- offset += copy;
-- to += copy;
-- }
-- start = end;
-- }
--
-- if (skb_shinfo(skb)->frag_list) {
-- struct sk_buff *list = skb_shinfo(skb)->frag_list;
--
-- for (; list; list = list->next) {
-- int end;
--
-- WARN_ON(start > offset + len);
--
-- end = start + list->len;
-- if ((copy = end - offset) > 0) {
-- if (copy > len)
-- copy = len;
-- if (skb_copy_bits(list, offset - start,
-- to, copy))
-- goto fault;
-- if ((len -= copy) == 0)
-- return 0;
-- offset += copy;
-- to += copy;
-- }
-- start = end;
-- }
-- }
-- if (!len)
-- return 0;
--
--fault:
-- return -EFAULT;
--}
--
--/*
-- * Callback from splice_to_pipe(), if we need to release some pages
-- * at the end of the spd in case we error'ed out in filling the pipe.
-- */
--static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
--{
-- struct sk_buff *skb = (struct sk_buff *) spd->partial[i].private;
--
-- kfree_skb(skb);
--}
--
--/*
-- * Fill page/offset/length into spd, if it can hold more pages.
-- */
--static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
-- unsigned int len, unsigned int offset,
-- struct sk_buff *skb)
--{
-- if (unlikely(spd->nr_pages == PIPE_BUFFERS))
-- return 1;
--
-- spd->pages[spd->nr_pages] = page;
-- spd->partial[spd->nr_pages].len = len;
-- spd->partial[spd->nr_pages].offset = offset;
-- spd->partial[spd->nr_pages].private = (unsigned long) skb_get(skb);
-- spd->nr_pages++;
-- return 0;
--}
--
--static inline void __segment_seek(struct page **page, unsigned int *poff,
-- unsigned int *plen, unsigned int off)
--{
-- *poff += off;
-- *page += *poff / PAGE_SIZE;
-- *poff = *poff % PAGE_SIZE;
-- *plen -= off;
--}
--
--static inline int __splice_segment(struct page *page, unsigned int poff,
-- unsigned int plen, unsigned int *off,
-- unsigned int *len, struct sk_buff *skb,
-- struct splice_pipe_desc *spd)
--{
-- if (!*len)
-- return 1;
--
-- /* skip this segment if already processed */
-- if (*off >= plen) {
-- *off -= plen;
-- return 0;
-- }
--
-- /* ignore any bits we already processed */
-- if (*off) {
-- __segment_seek(&page, &poff, &plen, *off);
-- *off = 0;
-- }
--
-- do {
-- unsigned int flen = min(*len, plen);
--
-- /* the linear region may spread across several pages */
-- flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
--
-- if (spd_fill_page(spd, page, flen, poff, skb))
-- return 1;
--
-- __segment_seek(&page, &poff, &plen, flen);
-- *len -= flen;
--
-- } while (*len && plen);
--
-- return 0;
--}
--
--/*
-- * Map linear and fragment data from the skb to spd. It reports failure if the
-- * pipe is full or if we already spliced the requested length.
-- */
--static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
-- unsigned int *len,
-- struct splice_pipe_desc *spd)
--{
-- int seg;
--
-- /*
-- * map the linear part
-- */
-- if (__splice_segment(virt_to_page(skb->data),
-- (unsigned long) skb->data & (PAGE_SIZE - 1),
-- skb_headlen(skb),
-- offset, len, skb, spd))
-- return 1;
--
-- /*
-- * then map the fragments
-- */
-- for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
-- const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
--
-- if (__splice_segment(f->page, f->page_offset, f->size,
-- offset, len, skb, spd))
-- return 1;
-- }
--
-- return 0;
--}
--
--/*
-- * Map data from the skb to a pipe. Should handle both the linear part,
-- * the fragments, and the frag list. It does NOT handle frag lists within
-- * the frag list, if such a thing exists. We'd probably need to recurse to
-- * handle that cleanly.
-- */
--int skb_splice_bits(struct sk_buff *__skb, unsigned int offset,
-- struct pipe_inode_info *pipe, unsigned int tlen,
-- unsigned int flags)
--{
-- struct partial_page partial[PIPE_BUFFERS];
-- struct page *pages[PIPE_BUFFERS];
-- struct splice_pipe_desc spd = {
-- .pages = pages,
-- .partial = partial,
-- .flags = flags,
-- .ops = &sock_pipe_buf_ops,
-- .spd_release = sock_spd_release,
-- };
-- struct sk_buff *skb;
--
-- /*
-- * I'd love to avoid the clone here, but tcp_read_sock()
-- * ignores reference counts and unconditonally kills the sk_buff
-- * on return from the actor.
-- */
-- skb = skb_clone(__skb, GFP_KERNEL);
-- if (unlikely(!skb))
-- return -ENOMEM;
--
-- /*
-- * __skb_splice_bits() only fails if the output has no room left,
-- * so no point in going over the frag_list for the error case.
-- */
-- if (__skb_splice_bits(skb, &offset, &tlen, &spd))
-- goto done;
-- else if (!tlen)
-- goto done;
--
-- /*
-- * now see if we have a frag_list to map
-- */
-- if (skb_shinfo(skb)->frag_list) {
-- struct sk_buff *list = skb_shinfo(skb)->frag_list;
--
-- for (; list && tlen; list = list->next) {
-- if (__skb_splice_bits(list, &offset, &tlen, &spd))
-- break;
-- }
-- }
--
--done:
-- /*
-- * drop our reference to the clone, the pipe consumption will
-- * drop the rest.
-- */
-- kfree_skb(skb);
--
-- if (spd.nr_pages) {
-- int ret;
-- struct sock *sk = __skb->sk;
--
-- /*
-- * Drop the socket lock, otherwise we have reverse
-- * locking dependencies between sk_lock and i_mutex
-- * here as compared to sendfile(). We enter here
-- * with the socket lock held, and splice_to_pipe() will
-- * grab the pipe inode lock. For sendfile() emulation,
-- * we call into ->sendpage() with the i_mutex lock held
-- * and networking will grab the socket lock.
-- */
-- release_sock(sk);
-- ret = splice_to_pipe(pipe, &spd);
-- lock_sock(sk);
-- return ret;
-- }
--
-- return 0;
--}
--
--/**
-- * skb_store_bits - store bits from kernel buffer to skb
-- * @skb: destination buffer
-- * @offset: offset in destination
-- * @from: source buffer
-- * @len: number of bytes to copy
-- *
-- * Copy the specified number of bytes from the source buffer to the
-- * destination skb. This function handles all the messy bits of
-- * traversing fragment lists and such.
-- */
--
--int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
--{
-- int i, copy;
-- int start = skb_headlen(skb);
--
-- if (offset > (int)skb->len - len)
-- goto fault;
--
-- if ((copy = start - offset) > 0) {
-- if (copy > len)
-- copy = len;
-- skb_copy_to_linear_data_offset(skb, offset, from, copy);
-- if ((len -= copy) == 0)
-- return 0;
-- offset += copy;
-- from += copy;
-- }
--
-- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-- int end;
--
-- WARN_ON(start > offset + len);
--
-- end = start + frag->size;
-- if ((copy = end - offset) > 0) {
-- u8 *vaddr;
--
-- if (copy > len)
-- copy = len;
--
-- vaddr = kmap_skb_frag(frag);
-- memcpy(vaddr + frag->page_offset + offset - start,
-- from, copy);
-- kunmap_skb_frag(vaddr);
--
-- if ((len -= copy) == 0)
-- return 0;
-- offset += copy;
-- from += copy;
-- }
-- start = end;
-- }
--
-- if (skb_shinfo(skb)->frag_list) {
-- struct sk_buff *list = skb_shinfo(skb)->frag_list;
--
-- for (; list; list = list->next) {
-- int end;
--
-- WARN_ON(start > offset + len);
--
-- end = start + list->len;
-- if ((copy = end - offset) > 0) {
-- if (copy > len)
-- copy = len;
-- if (skb_store_bits(list, offset - start,
-- from, copy))
-- goto fault;
-- if ((len -= copy) == 0)
-- return 0;
-- offset += copy;
-- from += copy;
-- }
-- start = end;
-- }
-- }
-- if (!len)
-- return 0;
--
--fault:
-- return -EFAULT;
--}
--
--EXPORT_SYMBOL(skb_store_bits);
--
--/* Checksum skb data. */
--
--__wsum skb_checksum(const struct sk_buff *skb, int offset,
-- int len, __wsum csum)
--{
-- int start = skb_headlen(skb);
-- int i, copy = start - offset;
-- int pos = 0;
--
-- /* Checksum header. */
-- if (copy > 0) {
-- if (copy > len)
-- copy = len;
-- csum = csum_partial(skb->data + offset, copy, csum);
-- if ((len -= copy) == 0)
-- return csum;
-- offset += copy;
-- pos = copy;
-- }
--
-- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-- int end;
--
-- WARN_ON(start > offset + len);
--
-- end = start + skb_shinfo(skb)->frags[i].size;
-- if ((copy = end - offset) > 0) {
-- __wsum csum2;
-- u8 *vaddr;
-- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
--
-- if (copy > len)
-- copy = len;
-- vaddr = kmap_skb_frag(frag);
-- csum2 = csum_partial(vaddr + frag->page_offset +
-- offset - start, copy, 0);
-- kunmap_skb_frag(vaddr);
-- csum = csum_block_add(csum, csum2, pos);
-- if (!(len -= copy))
-- return csum;
-- offset += copy;
-- pos += copy;
-- }
-- start = end;
-- }
--
-- if (skb_shinfo(skb)->frag_list) {
-- struct sk_buff *list = skb_shinfo(skb)->frag_list;
--
-- for (; list; list = list->next) {
-- int end;
--
-- WARN_ON(start > offset + len);
--
-- end = start + list->len;
-- if ((copy = end - offset) > 0) {
-- __wsum csum2;
-- if (copy > len)
-- copy = len;
-- csum2 = skb_checksum(list, offset - start,
-- copy, 0);
-- csum = csum_block_add(csum, csum2, pos);
-- if ((len -= copy) == 0)
-- return csum;
-- offset += copy;
-- pos += copy;
-- }
-- start = end;
-- }
-- }
-- BUG_ON(len);
--
-- return csum;
--}
--
--/* Both of above in one bottle. */
--
--__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
-- u8 *to, int len, __wsum csum)
--{
-- int start = skb_headlen(skb);
-- int i, copy = start - offset;
-- int pos = 0;
--
-- /* Copy header. */
-- if (copy > 0) {
-- if (copy > len)
-- copy = len;
-- csum = csum_partial_copy_nocheck(skb->data + offset, to,
-- copy, csum);
-- if ((len -= copy) == 0)
-- return csum;
-- offset += copy;
-- to += copy;
-- pos = copy;
-- }
--
-- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-- int end;
--
-- WARN_ON(start > offset + len);
--
-- end = start + skb_shinfo(skb)->frags[i].size;
-- if ((copy = end - offset) > 0) {
-- __wsum csum2;
-- u8 *vaddr;
-- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
--
-- if (copy > len)
-- copy = len;
-- vaddr = kmap_skb_frag(frag);
-- csum2 = csum_partial_copy_nocheck(vaddr +
-- frag->page_offset +
-- offset - start, to,
-- copy, 0);
-- kunmap_skb_frag(vaddr);
-- csum = csum_block_add(csum, csum2, pos);
-- if (!(len -= copy))
-- return csum;
-- offset += copy;
-- to += copy;
-- pos += copy;
-- }
-- start = end;
-- }
--
-- if (skb_shinfo(skb)->frag_list) {
-- struct sk_buff *list = skb_shinfo(skb)->frag_list;
--
-- for (; list; list = list->next) {
-- __wsum csum2;
-- int end;
--
-- WARN_ON(start > offset + len);
--
-- end = start + list->len;
-- if ((copy = end - offset) > 0) {
-- if (copy > len)
-- copy = len;
-- csum2 = skb_copy_and_csum_bits(list,
-- offset - start,
-- to, copy, 0);
-- csum = csum_block_add(csum, csum2, pos);
-- if ((len -= copy) == 0)
-- return csum;
-- offset += copy;
-- to += copy;
-- pos += copy;
-- }
-- start = end;
-- }
-- }
-- BUG_ON(len);
-- return csum;
--}
--
--void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
--{
-- __wsum csum;
-- long csstart;
--
-- if (skb->ip_summed == CHECKSUM_PARTIAL)
-- csstart = skb->csum_start - skb_headroom(skb);
-- else
-- csstart = skb_headlen(skb);
--
-- BUG_ON(csstart > skb_headlen(skb));
--
-- skb_copy_from_linear_data(skb, to, csstart);
--
-- csum = 0;
-- if (csstart != skb->len)
-- csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
-- skb->len - csstart, 0);
--
-- if (skb->ip_summed == CHECKSUM_PARTIAL) {
-- long csstuff = csstart + skb->csum_offset;
--
-- *((__sum16 *)(to + csstuff)) = csum_fold(csum);
-- }
--}
--
--/**
-- * skb_dequeue - remove from the head of the queue
-- * @list: list to dequeue from
-- *
-- * Remove the head of the list. The list lock is taken so the function
-- * may be used safely with other locking list functions. The head item is
-- * returned or %NULL if the list is empty.
-- */
--
--struct sk_buff *skb_dequeue(struct sk_buff_head *list)
--{
-- unsigned long flags;
-- struct sk_buff *result;
--
-- spin_lock_irqsave(&list->lock, flags);
-- result = __skb_dequeue(list);
-- spin_unlock_irqrestore(&list->lock, flags);
-- return result;
--}
--
--/**
-- * skb_dequeue_tail - remove from the tail of the queue
-- * @list: list to dequeue from
-- *
-- * Remove the tail of the list. The list lock is taken so the function
-- * may be used safely with other locking list functions. The tail item is
-- * returned or %NULL if the list is empty.
-- */
--struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
--{
-- unsigned long flags;
-- struct sk_buff *result;
--
-- spin_lock_irqsave(&list->lock, flags);
-- result = __skb_dequeue_tail(list);
-- spin_unlock_irqrestore(&list->lock, flags);
-- return result;
--}
--
--/**
-- * skb_queue_purge - empty a list
-- * @list: list to empty
-- *
-- * Delete all buffers on an &sk_buff list. Each buffer is removed from
-- * the list and one reference dropped. This function takes the list
-- * lock and is atomic with respect to other list locking functions.
-- */
--void skb_queue_purge(struct sk_buff_head *list)
--{
-- struct sk_buff *skb;
-- while ((skb = skb_dequeue(list)) != NULL)
-- kfree_skb(skb);
--}
--
--/**
-- * skb_queue_head - queue a buffer at the list head
-- * @list: list to use
-- * @newsk: buffer to queue
-- *
-- * Queue a buffer at the start of the list. This function takes the
-- * list lock and can be used safely with other locking &sk_buff functions
-- * safely.
-- *
-- * A buffer cannot be placed on two lists at the same time.
-- */
--void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
--{
-- unsigned long flags;
--
-- spin_lock_irqsave(&list->lock, flags);
-- __skb_queue_head(list, newsk);
-- spin_unlock_irqrestore(&list->lock, flags);
--}
--
--/**
-- * skb_queue_tail - queue a buffer at the list tail
-- * @list: list to use
-- * @newsk: buffer to queue
-- *
-- * Queue a buffer at the tail of the list. This function takes the
-- * list lock and can be used safely with other locking &sk_buff functions
-- * safely.
-- *
-- * A buffer cannot be placed on two lists at the same time.
-- */
--void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
--{
-- unsigned long flags;
--
-- spin_lock_irqsave(&list->lock, flags);
-- __skb_queue_tail(list, newsk);
-- spin_unlock_irqrestore(&list->lock, flags);
--}
--
--/**
-- * skb_unlink - remove a buffer from a list
-- * @skb: buffer to remove
-- * @list: list to use
-- *
-- * Remove a packet from a list. The list locks are taken and this
-- * function is atomic with respect to other list locked calls
-- *
-- * You must know what list the SKB is on.
-- */
--void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
--{
-- unsigned long flags;
--
-- spin_lock_irqsave(&list->lock, flags);
-- __skb_unlink(skb, list);
-- spin_unlock_irqrestore(&list->lock, flags);
--}
--
--/**
-- * skb_append - append a buffer
-- * @old: buffer to insert after
-- * @newsk: buffer to insert
-- * @list: list to use
-- *
-- * Place a packet after a given packet in a list. The list locks are taken
-- * and this function is atomic with respect to other list locked calls.
-- * A buffer cannot be placed on two lists at the same time.
-- */
--void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
--{
-- unsigned long flags;
--
-- spin_lock_irqsave(&list->lock, flags);
-- __skb_queue_after(list, old, newsk);
-- spin_unlock_irqrestore(&list->lock, flags);
--}
--
--
--/**
-- * skb_insert - insert a buffer
-- * @old: buffer to insert before
-- * @newsk: buffer to insert
-- * @list: list to use
-- *
-- * Place a packet before a given packet in a list. The list locks are
-- * taken and this function is atomic with respect to other list locked
-- * calls.
-- *
-- * A buffer cannot be placed on two lists at the same time.
-- */
--void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
--{
-- unsigned long flags;
--
-- spin_lock_irqsave(&list->lock, flags);
-- __skb_insert(newsk, old->prev, old, list);
-- spin_unlock_irqrestore(&list->lock, flags);
--}
--
--static inline void skb_split_inside_header(struct sk_buff *skb,
-- struct sk_buff* skb1,
-- const u32 len, const int pos)
--{
-- int i;
--
-- skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
-- pos - len);
-- /* And move data appendix as is. */
-- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-- skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
--
-- skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
-- skb_shinfo(skb)->nr_frags = 0;
-- skb1->data_len = skb->data_len;
-- skb1->len += skb1->data_len;
-- skb->data_len = 0;
-- skb->len = len;
-- skb_set_tail_pointer(skb, len);
--}
--
--static inline void skb_split_no_header(struct sk_buff *skb,
-- struct sk_buff* skb1,
-- const u32 len, int pos)
--{
-- int i, k = 0;
-- const int nfrags = skb_shinfo(skb)->nr_frags;
--
-- skb_shinfo(skb)->nr_frags = 0;
-- skb1->len = skb1->data_len = skb->len - len;
-- skb->len = len;
-- skb->data_len = len - pos;
--
-- for (i = 0; i < nfrags; i++) {
-- int size = skb_shinfo(skb)->frags[i].size;
--
-- if (pos + size > len) {
-- skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
--
-- if (pos < len) {
-- /* Split frag.
-- * We have two variants in this case:
-- * 1. Move all the frag to the second
-- * part, if it is possible. F.e.
-- * this approach is mandatory for TUX,
-- * where splitting is expensive.
-- * 2. Split is accurately. We make this.
-- */
-- get_page(skb_shinfo(skb)->frags[i].page);
-- skb_shinfo(skb1)->frags[0].page_offset += len - pos;
-- skb_shinfo(skb1)->frags[0].size -= len - pos;
-- skb_shinfo(skb)->frags[i].size = len - pos;
-- skb_shinfo(skb)->nr_frags++;
-- }
-- k++;
-- } else
-- skb_shinfo(skb)->nr_frags++;
-- pos += size;
-- }
-- skb_shinfo(skb1)->nr_frags = k;
--}
--
--/**
-- * skb_split - Split fragmented skb to two parts at length len.
-- * @skb: the buffer to split
-- * @skb1: the buffer to receive the second part
-- * @len: new length for skb
-- */
--void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
--{
-- int pos = skb_headlen(skb);
--
-- if (len < pos) /* Split line is inside header. */
-- skb_split_inside_header(skb, skb1, len, pos);
-- else /* Second chunk has no header, nothing to copy. */
-- skb_split_no_header(skb, skb1, len, pos);
--}
--
--/**
-- * skb_prepare_seq_read - Prepare a sequential read of skb data
-- * @skb: the buffer to read
-- * @from: lower offset of data to be read
-- * @to: upper offset of data to be read
-- * @st: state variable
-- *
-- * Initializes the specified state variable. Must be called before
-- * invoking skb_seq_read() for the first time.
-- */
--void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
-- unsigned int to, struct skb_seq_state *st)
--{
-- st->lower_offset = from;
-- st->upper_offset = to;
-- st->root_skb = st->cur_skb = skb;
-- st->frag_idx = st->stepped_offset = 0;
-- st->frag_data = NULL;
--}
--
--/**
-- * skb_seq_read - Sequentially read skb data
-- * @consumed: number of bytes consumed by the caller so far
-- * @data: destination pointer for data to be returned
-- * @st: state variable
-- *
-- * Reads a block of skb data at &consumed relative to the
-- * lower offset specified to skb_prepare_seq_read(). Assigns
-- * the head of the data block to &data and returns the length
-- * of the block or 0 if the end of the skb data or the upper
-- * offset has been reached.
-- *
-- * The caller is not required to consume all of the data
-- * returned, i.e. &consumed is typically set to the number
-- * of bytes already consumed and the next call to
-- * skb_seq_read() will return the remaining part of the block.
-- *
-- * Note 1: The size of each block of data returned can be arbitary,
-- * this limitation is the cost for zerocopy seqeuental
-- * reads of potentially non linear data.
-- *
-- * Note 2: Fragment lists within fragments are not implemented
-- * at the moment, state->root_skb could be replaced with
-- * a stack for this purpose.
-- */
--unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
-- struct skb_seq_state *st)
--{
-- unsigned int block_limit, abs_offset = consumed + st->lower_offset;
-- skb_frag_t *frag;
--
-- if (unlikely(abs_offset >= st->upper_offset))
-- return 0;
--
--next_skb:
-- block_limit = skb_headlen(st->cur_skb);
--
-- if (abs_offset < block_limit) {
-- *data = st->cur_skb->data + abs_offset;
-- return block_limit - abs_offset;
-- }
--
-- if (st->frag_idx == 0 && !st->frag_data)
-- st->stepped_offset += skb_headlen(st->cur_skb);
--
-- while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
-- frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
-- block_limit = frag->size + st->stepped_offset;
--
-- if (abs_offset < block_limit) {
-- if (!st->frag_data)
-- st->frag_data = kmap_skb_frag(frag);
--
-- *data = (u8 *) st->frag_data + frag->page_offset +
-- (abs_offset - st->stepped_offset);
--
-- return block_limit - abs_offset;
-- }
--
-- if (st->frag_data) {
-- kunmap_skb_frag(st->frag_data);
-- st->frag_data = NULL;
-- }
--
-- st->frag_idx++;
-- st->stepped_offset += frag->size;
-- }
--
-- if (st->frag_data) {
-- kunmap_skb_frag(st->frag_data);
-- st->frag_data = NULL;
-- }
--
-- if (st->cur_skb->next) {
-- st->cur_skb = st->cur_skb->next;
-- st->frag_idx = 0;
-- goto next_skb;
-- } else if (st->root_skb == st->cur_skb &&
-- skb_shinfo(st->root_skb)->frag_list) {
-- st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
-- goto next_skb;
-- }
--
-- return 0;
--}
--
--/**
-- * skb_abort_seq_read - Abort a sequential read of skb data
-- * @st: state variable
-- *
-- * Must be called if skb_seq_read() was not called until it
-- * returned 0.
-- */
--void skb_abort_seq_read(struct skb_seq_state *st)
--{
-- if (st->frag_data)
-- kunmap_skb_frag(st->frag_data);
--}
--
--#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
--
--static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
-- struct ts_config *conf,
-- struct ts_state *state)
--{
-- return skb_seq_read(offset, text, TS_SKB_CB(state));
--}
--
--static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
--{
-- skb_abort_seq_read(TS_SKB_CB(state));
--}
--
--/**
-- * skb_find_text - Find a text pattern in skb data
-- * @skb: the buffer to look in
-- * @from: search offset
-- * @to: search limit
-- * @config: textsearch configuration
-- * @state: uninitialized textsearch state variable
-- *
-- * Finds a pattern in the skb data according to the specified
-- * textsearch configuration. Use textsearch_next() to retrieve
-- * subsequent occurrences of the pattern. Returns the offset
-- * to the first occurrence or UINT_MAX if no match was found.
-- */
--unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
-- unsigned int to, struct ts_config *config,
-- struct ts_state *state)
--{
-- unsigned int ret;
--
-- config->get_next_block = skb_ts_get_next_block;
-- config->finish = skb_ts_finish;
--
-- skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
--
-- ret = textsearch_find(config, state);
-- return (ret <= to - from ? ret : UINT_MAX);
--}
--
--/**
-- * skb_append_datato_frags: - append the user data to a skb
-- * @sk: sock structure
-- * @skb: skb structure to be appened with user data.
-- * @getfrag: call back function to be used for getting the user data
-- * @from: pointer to user message iov
-- * @length: length of the iov message
-- *
-- * Description: This procedure append the user data in the fragment part
-- * of the skb if any page alloc fails user this procedure returns -ENOMEM
-- */
--int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
-- int (*getfrag)(void *from, char *to, int offset,
-- int len, int odd, struct sk_buff *skb),
-- void *from, int length)
--{
-- int frg_cnt = 0;
-- skb_frag_t *frag = NULL;
-- struct page *page = NULL;
-- int copy, left;
-- int offset = 0;
-- int ret;
--
-- do {
-- /* Return error if we don't have space for new frag */
-- frg_cnt = skb_shinfo(skb)->nr_frags;
-- if (frg_cnt >= MAX_SKB_FRAGS)
-- return -EFAULT;
--
-- /* allocate a new page for next frag */
-- page = alloc_pages(sk->sk_allocation, 0);
--
-- /* If alloc_page fails just return failure and caller will
-- * free previous allocated pages by doing kfree_skb()
-- */
-- if (page == NULL)
-- return -ENOMEM;
--
-- /* initialize the next frag */
-- sk->sk_sndmsg_page = page;
-- sk->sk_sndmsg_off = 0;
-- skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
-- skb->truesize += PAGE_SIZE;
-- atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
--
-- /* get the new initialized frag */
-- frg_cnt = skb_shinfo(skb)->nr_frags;
-- frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
--
-- /* copy the user data to page */
-- left = PAGE_SIZE - frag->page_offset;
-- copy = (length > left)? left : length;
--
-- ret = getfrag(from, (page_address(frag->page) +
-- frag->page_offset + frag->size),
-- offset, copy, 0, skb);
-- if (ret < 0)
-- return -EFAULT;
--
-- /* copy was successful so update the size parameters */
-- sk->sk_sndmsg_off += copy;
-- frag->size += copy;
-- skb->len += copy;
-- skb->data_len += copy;
-- offset += copy;
-- length -= copy;
--
-- } while (length > 0);
--
-- return 0;
--}
--
--/**
-- * skb_pull_rcsum - pull skb and update receive checksum
-- * @skb: buffer to update
-- * @len: length of data pulled
-- *
-- * This function performs an skb_pull on the packet and updates
-- * the CHECKSUM_COMPLETE checksum. It should be used on
-- * receive path processing instead of skb_pull unless you know
-- * that the checksum difference is zero (e.g., a valid IP header)
-- * or you are setting ip_summed to CHECKSUM_NONE.
-- */
--unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
--{
-- BUG_ON(len > skb->len);
-- skb->len -= len;
-- BUG_ON(skb->len < skb->data_len);
-- skb_postpull_rcsum(skb, skb->data, len);
-- return skb->data += len;
--}
--
--EXPORT_SYMBOL_GPL(skb_pull_rcsum);
--
--/**
-- * skb_segment - Perform protocol segmentation on skb.
-- * @skb: buffer to segment
-- * @features: features for the output path (see dev->features)
-- *
-- * This function performs segmentation on the given skb. It returns
-- * a pointer to the first in a list of new skbs for the segments.
-- * In case of error it returns ERR_PTR(err).
-- */
--struct sk_buff *skb_segment(struct sk_buff *skb, int features)
--{
-- struct sk_buff *segs = NULL;
-- struct sk_buff *tail = NULL;
-- unsigned int mss = skb_shinfo(skb)->gso_size;
-- unsigned int doffset = skb->data - skb_mac_header(skb);
-- unsigned int offset = doffset;
-- unsigned int headroom;
-- unsigned int len;
-- int sg = features & NETIF_F_SG;
-- int nfrags = skb_shinfo(skb)->nr_frags;
-- int err = -ENOMEM;
-- int i = 0;
-- int pos;
--
-- __skb_push(skb, doffset);
-- headroom = skb_headroom(skb);
-- pos = skb_headlen(skb);
--
-- do {
-- struct sk_buff *nskb;
-- skb_frag_t *frag;
-- int hsize;
-- int k;
-- int size;
--
-- len = skb->len - offset;
-- if (len > mss)
-- len = mss;
--
-- hsize = skb_headlen(skb) - offset;
-- if (hsize < 0)
-- hsize = 0;
-- if (hsize > len || !sg)
-- hsize = len;
--
-- nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC);
-- if (unlikely(!nskb))
-- goto err;
--
-- if (segs)
-- tail->next = nskb;
-- else
-- segs = nskb;
-- tail = nskb;
--
-- __copy_skb_header(nskb, skb);
-- nskb->mac_len = skb->mac_len;
--
-- skb_reserve(nskb, headroom);
-- skb_reset_mac_header(nskb);
-- skb_set_network_header(nskb, skb->mac_len);
-- nskb->transport_header = (nskb->network_header +
-- skb_network_header_len(skb));
-- skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
-- doffset);
-- if (!sg) {
-- nskb->ip_summed = CHECKSUM_NONE;
-- nskb->csum = skb_copy_and_csum_bits(skb, offset,
-- skb_put(nskb, len),
-- len, 0);
-- continue;
-- }
--
-- frag = skb_shinfo(nskb)->frags;
-- k = 0;
--
-- skb_copy_from_linear_data_offset(skb, offset,
-- skb_put(nskb, hsize), hsize);
--
-- while (pos < offset + len) {
-- BUG_ON(i >= nfrags);
--
-- *frag = skb_shinfo(skb)->frags[i];
-- get_page(frag->page);
-- size = frag->size;
--
-- if (pos < offset) {
-- frag->page_offset += offset - pos;
-- frag->size -= offset - pos;
-- }
--
-- k++;
--
-- if (pos + size <= offset + len) {
-- i++;
-- pos += size;
-- } else {
-- frag->size -= pos + size - (offset + len);
-- break;
-- }
--
-- frag++;
-- }
--
-- skb_shinfo(nskb)->nr_frags = k;
-- nskb->data_len = len - hsize;
-- nskb->len += nskb->data_len;
-- nskb->truesize += nskb->data_len;
-- } while ((offset += len) < skb->len);
--
-- return segs;
--
--err:
-- while ((skb = segs)) {
-- segs = skb->next;
-- kfree_skb(skb);
-- }
-- return ERR_PTR(err);
--}
--
--EXPORT_SYMBOL_GPL(skb_segment);
--
--void __init skb_init(void)
--{
-- skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
-- sizeof(struct sk_buff),
-- 0,
-- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
-- NULL);
-- skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
-- (2*sizeof(struct sk_buff)) +
-- sizeof(atomic_t),
-- 0,
-- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
-- NULL);
--}
--
--/**
-- * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
-- * @skb: Socket buffer containing the buffers to be mapped
-- * @sg: The scatter-gather list to map into
-- * @offset: The offset into the buffer's contents to start mapping
-- * @len: Length of buffer space to be mapped
-- *
-- * Fill the specified scatter-gather list with mappings/pointers into a
-- * region of the buffer space attached to a socket buffer.
-- */
--static int
--__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
--{
-- int start = skb_headlen(skb);
-- int i, copy = start - offset;
-- int elt = 0;
--
-- if (copy > 0) {
-- if (copy > len)
-- copy = len;
-- sg_set_buf(sg, skb->data + offset, copy);
-- elt++;
-- if ((len -= copy) == 0)
-- return elt;
-- offset += copy;
-- }
--
-- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-- int end;
--
-- WARN_ON(start > offset + len);
--
-- end = start + skb_shinfo(skb)->frags[i].size;
-- if ((copy = end - offset) > 0) {
-- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
--
-- if (copy > len)
-- copy = len;
-- sg_set_page(&sg[elt], frag->page, copy,
-- frag->page_offset+offset-start);
-- elt++;
-- if (!(len -= copy))
-- return elt;
-- offset += copy;
-- }
-- start = end;
-- }
--
-- if (skb_shinfo(skb)->frag_list) {
-- struct sk_buff *list = skb_shinfo(skb)->frag_list;
--
-- for (; list; list = list->next) {
-- int end;
--
-- WARN_ON(start > offset + len);
--
-- end = start + list->len;
-- if ((copy = end - offset) > 0) {
-- if (copy > len)
-- copy = len;
-- elt += __skb_to_sgvec(list, sg+elt, offset - start,
-- copy);
-- if ((len -= copy) == 0)
-- return elt;
-- offset += copy;
-- }
-- start = end;
-- }
-- }
-- BUG_ON(len);
-- return elt;
--}
--
--int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
--{
-- int nsg = __skb_to_sgvec(skb, sg, offset, len);
--
-- sg_mark_end(&sg[nsg - 1]);
--
-- return nsg;
--}
--
--/**
-- * skb_cow_data - Check that a socket buffer's data buffers are writable
-- * @skb: The socket buffer to check.
-- * @tailbits: Amount of trailing space to be added
-- * @trailer: Returned pointer to the skb where the @tailbits space begins
-- *
-- * Make sure that the data buffers attached to a socket buffer are
-- * writable. If they are not, private copies are made of the data buffers
-- * and the socket buffer is set to use these instead.
-- *
-- * If @tailbits is given, make sure that there is space to write @tailbits
-- * bytes of data beyond current end of socket buffer. @trailer will be
-- * set to point to the skb in which this space begins.
-- *
-- * The number of scatterlist elements required to completely map the
-- * COW'd and extended socket buffer will be returned.
-- */
--int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
--{
-- int copyflag;
-- int elt;
-- struct sk_buff *skb1, **skb_p;
--
-- /* If skb is cloned or its head is paged, reallocate
-- * head pulling out all the pages (pages are considered not writable
-- * at the moment even if they are anonymous).
-- */
-- if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
-- __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
-- return -ENOMEM;
--
-- /* Easy case. Most of packets will go this way. */
-- if (!skb_shinfo(skb)->frag_list) {
-- /* A little of trouble, not enough of space for trailer.
-- * This should not happen, when stack is tuned to generate
-- * good frames. OK, on miss we reallocate and reserve even more
-- * space, 128 bytes is fair. */
--
-- if (skb_tailroom(skb) < tailbits &&
-- pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
-- return -ENOMEM;
--
-- /* Voila! */
-- *trailer = skb;
-- return 1;
-- }
--
-- /* Misery. We are in troubles, going to mincer fragments... */
--
-- elt = 1;
-- skb_p = &skb_shinfo(skb)->frag_list;
-- copyflag = 0;
--
-- while ((skb1 = *skb_p) != NULL) {
-- int ntail = 0;
--
-- /* The fragment is partially pulled by someone,
-- * this can happen on input. Copy it and everything
-- * after it. */
--
-- if (skb_shared(skb1))
-- copyflag = 1;
--
-- /* If the skb is the last, worry about trailer. */
--
-- if (skb1->next == NULL && tailbits) {
-- if (skb_shinfo(skb1)->nr_frags ||
-- skb_shinfo(skb1)->frag_list ||
-- skb_tailroom(skb1) < tailbits)
-- ntail = tailbits + 128;
-- }
--
-- if (copyflag ||
-- skb_cloned(skb1) ||
-- ntail ||
-- skb_shinfo(skb1)->nr_frags ||
-- skb_shinfo(skb1)->frag_list) {
-- struct sk_buff *skb2;
--
-- /* Fuck, we are miserable poor guys... */
-- if (ntail == 0)
-- skb2 = skb_copy(skb1, GFP_ATOMIC);
-- else
-- skb2 = skb_copy_expand(skb1,
-- skb_headroom(skb1),
-- ntail,
-- GFP_ATOMIC);
-- if (unlikely(skb2 == NULL))
-- return -ENOMEM;
--
-- if (skb1->sk)
-- skb_set_owner_w(skb2, skb1->sk);
--
-- /* Looking around. Are we still alive?
-- * OK, link new skb, drop old one */
--
-- skb2->next = skb1->next;
-- *skb_p = skb2;
-- kfree_skb(skb1);
-- skb1 = skb2;
-- }
-- elt++;
-- *trailer = skb1;
-- skb_p = &skb1->next;
-- }
--
-- return elt;
--}
--
--/**
-- * skb_partial_csum_set - set up and verify partial csum values for packet
-- * @skb: the skb to set
-- * @start: the number of bytes after skb->data to start checksumming.
-- * @off: the offset from start to place the checksum.
-- *
-- * For untrusted partially-checksummed packets, we need to make sure the values
-- * for skb->csum_start and skb->csum_offset are valid so we don't oops.
-- *
-- * This function checks and sets those values and skb->ip_summed: if this
-- * returns false you should drop the packet.
-- */
--bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
--{
-- if (unlikely(start > skb->len - 2) ||
-- unlikely((int)start + off > skb->len - 2)) {
-- if (net_ratelimit())
-- printk(KERN_WARNING
-- "bad partial csum: csum=%u/%u len=%u\n",
-- start, off, skb->len);
-- return false;
-- }
-- skb->ip_summed = CHECKSUM_PARTIAL;
-- skb->csum_start = skb_headroom(skb) + start;
-- skb->csum_offset = off;
-- return true;
--}
--
--void __skb_warn_lro_forwarding(const struct sk_buff *skb)