X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Flinux%2Fsched.h;h=901154fe8c1deea2df8a97ceedc6d4e18e570d2c;hb=987b0145d94eecf292d8b301228356f44611ab7c;hp=fda98fe8135622709abd406d9f0dbf17ed61c1c7;hpb=f7ed79d23a47594e7834d66a8f14449796d4f3e6;p=linux-2.6.git diff --git a/include/linux/sched.h b/include/linux/sched.h index fda98fe81..901154fe8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -36,13 +36,10 @@ #include #include #include -#include #include /* For AT_VECTOR_SIZE */ struct exec_domain; -extern int exec_shield; -extern int print_fatal_signals; /* * cloning flags: @@ -103,7 +100,6 @@ DECLARE_PER_CPU(unsigned long, process_counts); extern int nr_processes(void); extern unsigned long nr_running(void); extern unsigned long nr_uninterruptible(void); -extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); #include @@ -212,11 +208,11 @@ extern void update_process_times(int user); extern void scheduler_tick(void); #ifdef CONFIG_DETECT_SOFTLOCKUP -extern void softlockup_tick(void); +extern void softlockup_tick(struct pt_regs *regs); extern void spawn_softlockup_task(void); extern void touch_softlockup_watchdog(void); #else -static inline void softlockup_tick(void) +static inline void softlockup_tick(struct pt_regs *regs) { } static inline void spawn_softlockup_task(void) @@ -251,10 +247,6 @@ extern int sysctl_max_map_count; extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); - -extern unsigned long -arch_get_unmapped_exec_area(struct file *, unsigned long, unsigned long, - unsigned long, unsigned long); extern unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, @@ -310,9 +302,6 @@ struct mm_struct { unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); - unsigned long (*get_unmapped_exec_area) (struct file *filp, - unsigned long addr, unsigned long len, - unsigned long pgoff, unsigned long flags); void (*unmap_area) (struct mm_struct *mm, unsigned long addr); unsigned long mmap_base; /* base of mmap area */ unsigned long task_size; /* size of task vm space */ @@ -371,8 +360,16 @@ struct sighand_struct { atomic_t count; struct k_sigaction action[_NSIG]; spinlock_t siglock; + struct rcu_head rcu; }; +extern void sighand_free_cb(struct rcu_head *rhp); + +static inline void sighand_free(struct sighand_struct *sp) +{ + call_rcu(&sp->rcu, sighand_free_cb); +} + /* * NOTE! "signal_struct" does not have it's own * locking, because a shared signal_struct always @@ -411,7 +408,6 @@ struct signal_struct { /* ITIMER_REAL timer for the process */ struct hrtimer real_timer; - struct task_struct *tsk; ktime_t it_real_incr; /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ @@ -499,7 +495,6 @@ struct signal_struct { #define MAX_PRIO (MAX_RT_PRIO + 40) #define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO)) -#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) /* * Some day this will be a full-fledged user tracking system.. @@ -700,14 +695,6 @@ static inline void prefetch_stack(struct task_struct *t) { } struct audit_context; /* See audit.c */ struct mempolicy; -struct pipe_inode_info; - -enum sleep_type { - SLEEP_NORMAL, - SLEEP_NONINTERACTIVE, - SLEEP_INTERACTIVE, - SLEEP_INTERRUPTED, -}; struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ @@ -726,12 +713,11 @@ struct task_struct { prio_array_t *array; unsigned short ioprio; - unsigned int btrace_seq; unsigned long sleep_avg; unsigned long long timestamp, last_ran; unsigned long long sched_time; /* sched_clock time spent running */ - enum sleep_type sleep_type; + int activated; unsigned long policy; cpumask_t cpus_allowed; @@ -777,8 +763,7 @@ struct task_struct { struct task_struct *group_leader; /* threadgroup leader */ /* PID/PID hash table linkage. */ - struct pid_link pids[PIDTYPE_MAX]; - struct list_head thread_group; + struct pid pids[PIDTYPE_MAX]; struct completion *vfork_done; /* for vfork() */ int __user *set_child_tid; /* CLONE_CHILD_SETTID */ @@ -837,11 +822,6 @@ struct task_struct { int (*notifier)(void *priv); void *notifier_data; sigset_t *notifier_mask; - - /* TUX state */ - void *tux_info; - void (*tux_exit)(void); - void *security; struct audit_context *audit_context; @@ -903,20 +883,9 @@ struct task_struct { struct cpuset *cpuset; nodemask_t mems_allowed; int cpuset_mems_generation; - int cpuset_mem_spread_rotor; -#endif - struct robust_list_head __user *robust_list; -#ifdef CONFIG_COMPAT - struct compat_robust_list_head __user *compat_robust_list; #endif - atomic_t fs_excl; /* holding fs exclusive resources */ struct rcu_head rcu; - - /* - * cache last used pipe for splice - */ - struct pipe_inode_info *splice_pipe; }; static inline pid_t process_group(struct task_struct *tsk) @@ -934,18 +903,18 @@ static inline pid_t process_group(struct task_struct *tsk) */ static inline int pid_alive(struct task_struct *p) { - return p->pids[PIDTYPE_PID].pid != NULL; + return p->pids[PIDTYPE_PID].nr != 0; } extern void free_task(struct task_struct *tsk); #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) -extern void __put_task_struct(struct task_struct *t); +extern void __put_task_struct_cb(struct rcu_head *rhp); static inline void put_task_struct(struct task_struct *t) { if (atomic_dec_and_test(&t->usage)) - __put_task_struct(t); + call_rcu(&t->rcu, __put_task_struct_cb); } /* @@ -974,9 +943,6 @@ static inline void put_task_struct(struct task_struct *t) #define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */ #define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */ #define PF_SWAPWRITE 0x01000000 /* Allowed to write to swap */ -#define PF_SPREAD_PAGE 0x04000000 /* Spread page cache over cpuset */ -#define PF_SPREAD_SLAB 0x08000000 /* Spread some slab caches over cpuset */ -#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ /* * Only the _current_ task can read/write to tsk->flags, but other @@ -1071,10 +1037,9 @@ extern struct mm_struct init_mm; #define find_task_by_real_pid(nr) \ - find_task_by_pid_type(PIDTYPE_PID, nr) + find_task_by_pid_type(PIDTYPE_REALPID, nr) #define find_task_by_pid(nr) \ - find_task_by_pid_type(PIDTYPE_PID, \ - vx_rmap_pid(nr)) + find_task_by_pid_type(PIDTYPE_PID, nr) extern struct task_struct *find_task_by_pid_type(int type, int pid); extern void set_special_pids(pid_t session, pid_t pgrp); @@ -1144,6 +1109,7 @@ extern void force_sig_specific(int, struct task_struct *); extern int send_sig(int, struct task_struct *, int); extern void zap_other_threads(struct task_struct *p); extern int kill_pg(pid_t, int, int); +extern int kill_sl(pid_t, int, int); extern int kill_proc(pid_t, int, int); extern struct sigqueue *sigqueue_alloc(void); extern void sigqueue_free(struct sigqueue *); @@ -1200,8 +1166,10 @@ extern void flush_thread(void); extern void exit_thread(void); extern void exit_files(struct task_struct *); -extern void __cleanup_signal(struct signal_struct *); -extern void __cleanup_sighand(struct sighand_struct *); +extern void exit_signal(struct task_struct *); +extern void __exit_signal(struct task_struct *); +extern void exit_sighand(struct task_struct *); +extern void __exit_sighand(struct task_struct *); extern void exit_itimers(struct signal_struct *); extern NORET_TYPE void do_group_exit(int); @@ -1225,9 +1193,22 @@ extern void wait_task_inactive(task_t * p); #endif #define remove_parent(p) list_del_init(&(p)->sibling) -#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children) +#define add_parent(p, parent) list_add_tail(&(p)->sibling,&(parent)->children) + +#define REMOVE_LINKS(p) do { \ + if (thread_group_leader(p)) \ + list_del_init(&(p)->tasks); \ + remove_parent(p); \ + } while (0) + +#define SET_LINKS(p) do { \ + if (thread_group_leader(p)) \ + list_add_tail(&(p)->tasks,&init_task.tasks); \ + add_parent(p, (p)->parent); \ + } while (0) -#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) +#define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks) +#define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks) #define for_each_process(p) \ for (p = &init_task ; (p = next_task(p)) != &init_task ; ) @@ -1242,23 +1223,20 @@ extern void wait_task_inactive(task_t * p); #define while_each_thread(g, t) \ while ((t = next_thread(t)) != g) -/* de_thread depends on thread_group_leader not being a pid based check */ -#define thread_group_leader(p) (p == p->group_leader) +extern task_t * FASTCALL(next_thread(const task_t *p)); -static inline task_t *next_thread(const task_t *p) -{ - return list_entry(rcu_dereference(p->thread_group.next), - task_t, thread_group); -} +#define thread_group_leader(p) (p->pid == p->tgid) static inline int thread_group_empty(task_t *p) { - return list_empty(&p->thread_group); + return list_empty(&p->pids[PIDTYPE_TGID].pid_list); } #define delay_group_leader(p) \ (thread_group_leader(p) && !thread_group_empty(p)) +extern void unhash_process(struct task_struct *p); + /* * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring * subscriptions and synchronises with wait4(). Also used in procfs. Also @@ -1278,15 +1256,6 @@ static inline void task_unlock(struct task_struct *p) spin_unlock(&p->alloc_lock); } -extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk, - unsigned long *flags); - -static inline void unlock_task_sighand(struct task_struct *tsk, - unsigned long *flags) -{ - spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); -} - #ifndef __HAVE_THREAD_FUNCTIONS #define task_thread_info(task) (task)->thread_info