X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Flinux%2Fsched.h;fp=include%2Flinux%2Fsched.h;h=901154fe8c1deea2df8a97ceedc6d4e18e570d2c;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=45a8e67e99b9edbad8fe5787c8c6f1dfffda11bf;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/include/linux/sched.h b/include/linux/sched.h index 45a8e67e9..901154fe8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1,46 +1,9 @@ #ifndef _LINUX_SCHED_H #define _LINUX_SCHED_H -#include /* For AT_VECTOR_SIZE */ - -/* - * cloning flags: - */ -#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ -#define CLONE_VM 0x00000100 /* set if VM shared between processes */ -#define CLONE_FS 0x00000200 /* set if fs info shared between processes */ -#define CLONE_FILES 0x00000400 /* set if open files shared between processes */ -#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ -#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ -#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ -#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ -#define CLONE_THREAD 0x00010000 /* Same thread group? */ -#define CLONE_NEWNS 0x00020000 /* New namespace group? */ -#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ -#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ -#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ -#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */ -#define CLONE_DETACHED 0x00400000 /* Unused, ignored */ -#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ -#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ -#define CLONE_STOPPED 0x02000000 /* Start in stopped state */ - -/* - * Scheduling policies - */ -#define SCHED_NORMAL 0 -#define SCHED_FIFO 1 -#define SCHED_RR 2 -#define SCHED_BATCH 3 - -#ifdef __KERNEL__ - -struct sched_param { - int sched_priority; -}; - #include /* for HZ */ +#include #include #include #include @@ -73,21 +36,32 @@ struct sched_param { #include #include #include -#include -#include -#include -#include -#include -#include -#include - -#include +#include /* For AT_VECTOR_SIZE */ struct exec_domain; -struct futex_pi_state; -extern int exec_shield; -extern int print_fatal_signals; + +/* + * cloning flags: + */ +#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */ +#define CLONE_VM 0x00000100 /* set if VM shared between processes */ +#define CLONE_FS 0x00000200 /* set if fs info shared between processes */ +#define CLONE_FILES 0x00000400 /* set if open files shared between processes */ +#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ +#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ +#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ +#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ +#define CLONE_THREAD 0x00010000 /* Same thread group? */ +#define CLONE_NEWNS 0x00020000 /* New namespace group? */ +#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ +#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ +#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ +#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */ +#define CLONE_DETACHED 0x00400000 /* Unused, ignored */ +#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */ +#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */ +#define CLONE_STOPPED 0x02000000 /* Start in stopped state */ /* * List of flags we want to share for kernel threads, @@ -126,10 +100,15 @@ DECLARE_PER_CPU(unsigned long, process_counts); extern int nr_processes(void); extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); -extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); -extern unsigned long weighted_cpuload(const int cpu); +#include +#include +#include +#include +#include + +#include /* * Task state bitmask. NOTE! These bits are also @@ -177,6 +156,20 @@ extern unsigned long weighted_cpuload(const int cpu); /* Task command name length */ #define TASK_COMM_LEN 16 +/* + * Scheduling policies + */ +#define SCHED_NORMAL 0 +#define SCHED_FIFO 1 +#define SCHED_RR 2 +#define SCHED_BATCH 3 + +struct sched_param { + int sched_priority; +}; + +#ifdef __KERNEL__ + #include /* @@ -188,11 +181,11 @@ extern unsigned long weighted_cpuload(const int cpu); extern rwlock_t tasklist_lock; extern spinlock_t mmlist_lock; -struct task_struct; +typedef struct task_struct task_t; extern void sched_init(void); extern void sched_init_smp(void); -extern void init_idle(struct task_struct *idle, int cpu); +extern void init_idle(task_t *idle, int cpu); extern cpumask_t nohz_cpu_mask; @@ -215,11 +208,11 @@ extern void update_process_times(int user); extern void scheduler_tick(void); #ifdef CONFIG_DETECT_SOFTLOCKUP -extern void softlockup_tick(void); +extern void softlockup_tick(struct pt_regs *regs); extern void spawn_softlockup_task(void); extern void touch_softlockup_watchdog(void); #else -static inline void softlockup_tick(void) +static inline void softlockup_tick(struct pt_regs *regs) { } static inline void spawn_softlockup_task(void) @@ -254,10 +247,6 @@ extern int sysctl_max_map_count; extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); - -extern unsigned long -arch_get_unmapped_exec_area(struct file *, unsigned long, unsigned long, - unsigned long, unsigned long); extern unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, @@ -313,9 +302,6 @@ struct mm_struct { unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); - unsigned long (*get_unmapped_exec_area) (struct file *filp, - unsigned long addr, unsigned long len, - unsigned long pgoff, unsigned long flags); void (*unmap_area) (struct mm_struct *mm, unsigned long addr); unsigned long mmap_base; /* base of mmap area */ unsigned long task_size; /* size of task vm space */ @@ -374,15 +360,15 @@ struct sighand_struct { atomic_t count; struct k_sigaction action[_NSIG]; spinlock_t siglock; + struct rcu_head rcu; }; -struct pacct_struct { - int ac_flag; - long ac_exitcode; - unsigned long ac_mem; - cputime_t ac_utime, ac_stime; - unsigned long ac_minflt, ac_majflt; -}; +extern void sighand_free_cb(struct rcu_head *rhp); + +static inline void sighand_free(struct sighand_struct *sp) +{ + call_rcu(&sp->rcu, sighand_free_cb); +} /* * NOTE! "signal_struct" does not have it's own @@ -398,7 +384,7 @@ struct signal_struct { wait_queue_head_t wait_chldexit; /* for wait4() */ /* current thread group signal load-balancing target: */ - struct task_struct *curr_target; + task_t *curr_target; /* shared signal handling: */ struct sigpending shared_pending; @@ -422,7 +408,6 @@ struct signal_struct { /* ITIMER_REAL timer for the process */ struct hrtimer real_timer; - struct task_struct *tsk; ktime_t it_real_incr; /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ @@ -475,13 +460,6 @@ struct signal_struct { struct key *session_keyring; /* keyring inherited over fork */ struct key *process_keyring; /* keyring private to this process */ #endif -#ifdef CONFIG_BSD_PROCESS_ACCT - struct pacct_struct pacct; /* per-process accounting information */ -#endif -#ifdef CONFIG_TASKSTATS - spinlock_t stats_lock; - struct taskstats *stats; -#endif }; /* Context switch must be unlocked if interrupts are to be enabled */ @@ -516,11 +494,7 @@ struct signal_struct { #define MAX_PRIO (MAX_RT_PRIO + 40) -#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) -#define rt_task(p) rt_prio((p)->prio) -#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) -#define has_rt_policy(p) \ - unlikely((p)->policy != SCHED_NORMAL && (p)->policy != SCHED_BATCH) +#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO)) /* * Some day this will be a full-fledged user tracking system.. @@ -530,7 +504,7 @@ struct user_struct { atomic_t processes; /* How many processes does this user have? */ atomic_t files; /* How many open files does this user have? */ atomic_t sigpending; /* How many pending signals does this user have? */ -#ifdef CONFIG_INOTIFY_USER +#ifdef CONFIG_INOTIFY atomic_t inotify_watches; /* How many inotify watches does this user have? */ atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ #endif @@ -554,10 +528,11 @@ extern struct user_struct *find_user(xid_t, uid_t); extern struct user_struct root_user; #define INIT_USER (&root_user) +typedef struct prio_array prio_array_t; struct backing_dev_info; struct reclaim_state; -#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) +#ifdef CONFIG_SCHEDSTATS struct sched_info { /* cumulative counters */ unsigned long cpu_time, /* time spent on the cpu */ @@ -568,53 +543,9 @@ struct sched_info { unsigned long last_arrival, /* when we last ran on a cpu */ last_queued; /* when we were last queued to run */ }; -#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ -#ifdef CONFIG_SCHEDSTATS extern struct file_operations proc_schedstat_operations; -#endif /* CONFIG_SCHEDSTATS */ - -#ifdef CONFIG_TASK_DELAY_ACCT -struct task_delay_info { - spinlock_t lock; - unsigned int flags; /* Private per-task flags */ - - /* For each stat XXX, add following, aligned appropriately - * - * struct timespec XXX_start, XXX_end; - * u64 XXX_delay; - * u32 XXX_count; - * - * Atomicity of updates to XXX_delay, XXX_count protected by - * single lock above (split into XXX_lock if contention is an issue). - */ - - /* - * XXX_count is incremented on every XXX operation, the delay - * associated with the operation is added to XXX_delay. - * XXX_delay contains the accumulated delay time in nanoseconds. - */ - struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ - u64 blkio_delay; /* wait for sync block io completion */ - u64 swapin_delay; /* wait for swapin block io completion */ - u32 blkio_count; /* total count of the number of sync block */ - /* io operations performed */ - u32 swapin_count; /* total count of the number of swapin block */ - /* io operations performed */ -}; -#endif /* CONFIG_TASK_DELAY_ACCT */ - -static inline int sched_info_on(void) -{ -#ifdef CONFIG_SCHEDSTATS - return 1; -#elif defined(CONFIG_TASK_DELAY_ACCT) - extern int delayacct_on; - return delayacct_on; -#else - return 0; #endif -} enum idle_type { @@ -627,9 +558,9 @@ enum idle_type /* * sched-domains (multiprocessor balancing) declarations: */ +#ifdef CONFIG_SMP #define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ -#ifdef CONFIG_SMP #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ #define SD_BALANCE_EXEC 4 /* Balance on exec */ @@ -638,11 +569,6 @@ enum idle_type #define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */ #define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */ #define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */ -#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */ - -#define BALANCE_FOR_POWER ((sched_mc_power_savings || sched_smt_power_savings) \ - ? SD_POWERSAVINGS_BALANCE : 0) - struct sched_group { struct sched_group *next; /* Must be a circular list */ @@ -712,7 +638,7 @@ struct sched_domain { #endif }; -extern int partition_sched_domains(cpumask_t *partition1, +extern void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2); /* @@ -762,23 +688,13 @@ extern int groups_search(struct group_info *group_info, gid_t grp); ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK]) #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK -extern void prefetch_stack(struct task_struct *t); +extern void prefetch_stack(struct task_struct*); #else static inline void prefetch_stack(struct task_struct *t) { } #endif struct audit_context; /* See audit.c */ struct mempolicy; -struct pipe_inode_info; - -enum sleep_type { - SLEEP_NORMAL, - SLEEP_NONINTERACTIVE, - SLEEP_INTERACTIVE, - SLEEP_INTERRUPTED, -}; - -struct prio_array; struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ @@ -789,29 +705,25 @@ struct task_struct { int lock_depth; /* BKL lock depth */ -#ifdef CONFIG_SMP -#ifdef __ARCH_WANT_UNLOCKED_CTXSW +#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) int oncpu; #endif -#endif - int load_weight; /* for niceness load balancing purposes */ - int prio, static_prio, normal_prio; + int prio, static_prio; struct list_head run_list; - struct prio_array *array; + prio_array_t *array; unsigned short ioprio; - unsigned int btrace_seq; unsigned long sleep_avg; unsigned long long timestamp, last_ran; unsigned long long sched_time; /* sched_clock time spent running */ - enum sleep_type sleep_type; + int activated; unsigned long policy; cpumask_t cpus_allowed; unsigned int time_slice, first_time_slice; -#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) +#ifdef CONFIG_SCHEDSTATS struct sched_info sched_info; #endif @@ -851,8 +763,7 @@ struct task_struct { struct task_struct *group_leader; /* threadgroup leader */ /* PID/PID hash table linkage. */ - struct pid_link pids[PIDTYPE_MAX]; - struct list_head thread_group; + struct pid pids[PIDTYPE_MAX]; struct completion *vfork_done; /* for vfork() */ int __user *set_child_tid; /* CLONE_CHILD_SETTID */ @@ -911,11 +822,6 @@ struct task_struct { int (*notifier)(void *priv); void *notifier_data; sigset_t *notifier_mask; - - /* TUX state */ - void *tux_info; - void (*tux_exit)(void); - void *security; struct audit_context *audit_context; @@ -934,43 +840,13 @@ struct task_struct { u32 self_exec_id; /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ spinlock_t alloc_lock; - - /* Protection of the PI data structures: */ - spinlock_t pi_lock; - -#ifdef CONFIG_RT_MUTEXES - /* PI waiters blocked on a rt_mutex held by this task */ - struct plist_head pi_waiters; - /* Deadlock detection and priority inheritance handling */ - struct rt_mutex_waiter *pi_blocked_on; -#endif +/* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ + spinlock_t proc_lock; #ifdef CONFIG_DEBUG_MUTEXES /* mutex deadlock detection */ struct mutex_waiter *blocked_on; #endif -#ifdef CONFIG_TRACE_IRQFLAGS - unsigned int irq_events; - int hardirqs_enabled; - unsigned long hardirq_enable_ip; - unsigned int hardirq_enable_event; - unsigned long hardirq_disable_ip; - unsigned int hardirq_disable_event; - int softirqs_enabled; - unsigned long softirq_disable_ip; - unsigned int softirq_disable_event; - unsigned long softirq_enable_ip; - unsigned int softirq_enable_event; - int hardirq_context; - int softirq_context; -#endif -#ifdef CONFIG_LOCKDEP -# define MAX_LOCK_DEPTH 30UL - u64 curr_chain_key; - int lockdep_depth; - struct held_lock held_locks[MAX_LOCK_DEPTH]; - unsigned int lockdep_recursion; -#endif /* journalling filesystem info */ void *journal_info; @@ -978,6 +854,7 @@ struct task_struct { /* VM state */ struct reclaim_state *reclaim_state; + struct dentry *proc_dentry; struct backing_dev_info *backing_dev_info; struct io_context *io_context; @@ -1006,25 +883,9 @@ struct task_struct { struct cpuset *cpuset; nodemask_t mems_allowed; int cpuset_mems_generation; - int cpuset_mem_spread_rotor; -#endif - struct robust_list_head __user *robust_list; -#ifdef CONFIG_COMPAT - struct compat_robust_list_head __user *compat_robust_list; #endif - struct list_head pi_state_list; - struct futex_pi_state *pi_state_cache; - atomic_t fs_excl; /* holding fs exclusive resources */ struct rcu_head rcu; - - /* - * cache last used pipe for splice - */ - struct pipe_inode_info *splice_pipe; -#ifdef CONFIG_TASK_DELAY_ACCT - struct task_delay_info *delays; -#endif }; static inline pid_t process_group(struct task_struct *tsk) @@ -1042,18 +903,18 @@ static inline pid_t process_group(struct task_struct *tsk) */ static inline int pid_alive(struct task_struct *p) { - return p->pids[PIDTYPE_PID].pid != NULL; + return p->pids[PIDTYPE_PID].nr != 0; } extern void free_task(struct task_struct *tsk); #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) -extern void __put_task_struct(struct task_struct *t); +extern void __put_task_struct_cb(struct rcu_head *rhp); static inline void put_task_struct(struct task_struct *t) { if (atomic_dec_and_test(&t->usage)) - __put_task_struct(t); + call_rcu(&t->rcu, __put_task_struct_cb); } /* @@ -1078,13 +939,10 @@ static inline void put_task_struct(struct task_struct *t) #define PF_KSWAPD 0x00040000 /* I am kswapd */ #define PF_SWAPOFF 0x00080000 /* I am in swapoff */ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ -#define PF_BORROWED_MM 0x00200000 /* I am a kthread doing use_mm */ -#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */ -#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ -#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */ -#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */ -#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ -#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ +#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ +#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */ +#define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */ +#define PF_SWAPWRITE 0x01000000 /* Allowed to write to swap */ /* * Only the _current_ task can read/write to tsk->flags, but other @@ -1112,9 +970,9 @@ static inline void put_task_struct(struct task_struct *t) #define used_math() tsk_used_math(current) #ifdef CONFIG_SMP -extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask); +extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); #else -static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) +static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) { if (!cpu_isset(0, new_mask)) return -EINVAL; @@ -1123,8 +981,7 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) #endif extern unsigned long long sched_clock(void); -extern unsigned long long -current_sched_time(const struct task_struct *current_task); +extern unsigned long long current_sched_time(const task_t *current_task); /* sched_exec is called by processes performing an exec */ #ifdef CONFIG_SMP @@ -1140,29 +997,16 @@ static inline void idle_task_exit(void) {} #endif extern void sched_idle_next(void); - -#ifdef CONFIG_RT_MUTEXES -extern int rt_mutex_getprio(struct task_struct *p); -extern void rt_mutex_setprio(struct task_struct *p, int prio); -extern void rt_mutex_adjust_pi(struct task_struct *p); -#else -static inline int rt_mutex_getprio(struct task_struct *p) -{ - return p->normal_prio; -} -# define rt_mutex_adjust_pi(p) do { } while (0) -#endif - -extern void set_user_nice(struct task_struct *p, long nice); -extern int task_prio(const struct task_struct *p); -extern int task_nice(const struct task_struct *p); -extern int can_nice(const struct task_struct *p, const int nice); -extern int task_curr(const struct task_struct *p); +extern void set_user_nice(task_t *p, long nice); +extern int task_prio(const task_t *p); +extern int task_nice(const task_t *p); +extern int can_nice(const task_t *p, const int nice); +extern int task_curr(const task_t *p); extern int idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); -extern struct task_struct *idle_task(int cpu); -extern struct task_struct *curr_task(int cpu); -extern void set_curr_task(int cpu, struct task_struct *p); +extern task_t *idle_task(int cpu); +extern task_t *curr_task(int cpu); +extern void set_curr_task(int cpu, task_t *p); void yield(void); @@ -1224,8 +1068,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, #else static inline void kick_process(struct task_struct *tsk) { } #endif -extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags)); -extern void FASTCALL(sched_exit(struct task_struct * p)); +extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); +extern void FASTCALL(sched_exit(task_t * p)); extern int in_group_p(gid_t); extern int in_egroup_p(gid_t); @@ -1258,13 +1102,14 @@ extern int force_sig_info(int, struct siginfo *, struct task_struct *); extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); extern int kill_pg_info(int, struct siginfo *, pid_t); extern int kill_proc_info(int, struct siginfo *, pid_t); -extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t, u32); +extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t); extern void do_notify_parent(struct task_struct *, int); extern void force_sig(int, struct task_struct *); extern void force_sig_specific(int, struct task_struct *); extern int send_sig(int, struct task_struct *, int); extern void zap_other_threads(struct task_struct *p); extern int kill_pg(pid_t, int, int); +extern int kill_sl(pid_t, int, int); extern int kill_proc(pid_t, int, int); extern struct sigqueue *sigqueue_alloc(void); extern void sigqueue_free(struct sigqueue *); @@ -1321,8 +1166,10 @@ extern void flush_thread(void); extern void exit_thread(void); extern void exit_files(struct task_struct *); -extern void __cleanup_signal(struct signal_struct *); -extern void __cleanup_sighand(struct sighand_struct *); +extern void exit_signal(struct task_struct *); +extern void __exit_signal(struct task_struct *); +extern void exit_sighand(struct task_struct *); +extern void __exit_sighand(struct task_struct *); extern void exit_itimers(struct signal_struct *); extern NORET_TYPE void do_group_exit(int); @@ -1330,25 +1177,38 @@ extern NORET_TYPE void do_group_exit(int); extern void daemonize(const char *, ...); extern int allow_signal(int); extern int disallow_signal(int); -extern struct task_struct *child_reaper; +extern task_t *child_reaper; extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); -struct task_struct *fork_idle(int); +task_t *fork_idle(int); extern void set_task_comm(struct task_struct *tsk, char *from); extern void get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP -extern void wait_task_inactive(struct task_struct * p); +extern void wait_task_inactive(task_t * p); #else #define wait_task_inactive(p) do { } while (0) #endif #define remove_parent(p) list_del_init(&(p)->sibling) -#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children) +#define add_parent(p, parent) list_add_tail(&(p)->sibling,&(parent)->children) + +#define REMOVE_LINKS(p) do { \ + if (thread_group_leader(p)) \ + list_del_init(&(p)->tasks); \ + remove_parent(p); \ + } while (0) -#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) +#define SET_LINKS(p) do { \ + if (thread_group_leader(p)) \ + list_add_tail(&(p)->tasks,&init_task.tasks); \ + add_parent(p, (p)->parent); \ + } while (0) + +#define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks) +#define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks) #define for_each_process(p) \ for (p = &init_task ; (p = next_task(p)) != &init_task ; ) @@ -1363,25 +1223,22 @@ extern void wait_task_inactive(struct task_struct * p); #define while_each_thread(g, t) \ while ((t = next_thread(t)) != g) -/* de_thread depends on thread_group_leader not being a pid based check */ -#define thread_group_leader(p) (p == p->group_leader) +extern task_t * FASTCALL(next_thread(const task_t *p)); -static inline struct task_struct *next_thread(const struct task_struct *p) -{ - return list_entry(rcu_dereference(p->thread_group.next), - struct task_struct, thread_group); -} +#define thread_group_leader(p) (p->pid == p->tgid) -static inline int thread_group_empty(struct task_struct *p) +static inline int thread_group_empty(task_t *p) { - return list_empty(&p->thread_group); + return list_empty(&p->pids[PIDTYPE_TGID].pid_list); } #define delay_group_leader(p) \ (thread_group_leader(p) && !thread_group_empty(p)) +extern void unhash_process(struct task_struct *p); + /* - * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring + * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring * subscriptions and synchronises with wait4(). Also used in procfs. Also * pins the final release of task.io_context. Also protects ->cpuset. * @@ -1399,15 +1256,6 @@ static inline void task_unlock(struct task_struct *p) spin_unlock(&p->alloc_lock); } -extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, - unsigned long *flags); - -static inline void unlock_task_sighand(struct task_struct *tsk, - unsigned long *flags) -{ - spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); -} - #ifndef __HAVE_THREAD_FUNCTIONS #define task_thread_info(task) (task)->thread_info @@ -1557,11 +1405,6 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm) extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); extern long sched_getaffinity(pid_t pid, cpumask_t *mask); -#include -extern int sched_mc_power_savings, sched_smt_power_savings; -extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings; -extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls); - extern void normalize_rt_tasks(void); #ifdef CONFIG_PM @@ -1590,14 +1433,6 @@ static inline void freeze(struct task_struct *p) p->flags |= PF_FREEZE; } -/* - * Sometimes we may need to cancel the previous 'freeze' request - */ -static inline void do_not_freeze(struct task_struct *p) -{ - p->flags &= ~PF_FREEZE; -} - /* * Wake up a frozen process */