X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Flinux%2Fsched.h;h=7fc52d7b5f5fa094dc45cea4165aca98e94f52d9;hb=9bf4aaab3e101692164d49b7ca357651eb691cb6;hp=3ed211a17ff4da014a335a7c29dcd7a1ab0c7ab3;hpb=db216c3d5e4c040e557a50f8f5d35d5c415e8c1c;p=linux-2.6.git diff --git a/include/linux/sched.h b/include/linux/sched.h index 3ed211a17..7fc52d7b5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -170,8 +170,6 @@ long io_schedule_timeout(long timeout); extern void cpu_init (void); extern void trap_init(void); extern void update_process_times(int user); -extern void update_one_process(struct task_struct *p, unsigned long user, - unsigned long system, int cpu); extern void scheduler_tick(int user_tick, int system); extern unsigned long cache_decay_ticks; @@ -219,9 +217,6 @@ struct mm_struct { unsigned long saved_auxv[40]; /* for /proc/PID/auxv */ unsigned dumpable:1; -#ifdef CONFIG_HUGETLB_PAGE - int used_hugetlb; -#endif cpumask_t cpu_vm_mask; /* Architecture-specific MM context */ @@ -317,6 +312,9 @@ struct user_struct { atomic_t __count; /* reference count */ atomic_t processes; /* How many processes does this user have? */ atomic_t files; /* How many open files does this user have? */ + atomic_t sigpending; /* How many pending signals does this user have? */ + /* protected by mq_lock */ + unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ /* Hash table maintenance information */ struct list_head uidhash_list; @@ -349,6 +347,8 @@ struct k_itimer { struct task_struct *it_process; /* process to send signal to */ struct timer_list it_timer; struct sigqueue *sigq; /* signal queue entry. */ + struct list_head abs_timer_entry; /* clock abs_timer_list */ + struct timespec wall_to_prev; /* wall_to_monotonic used when set */ }; @@ -365,6 +365,12 @@ struct group_info { gid_t *blocks[0]; }; +/* + * get_group_info() must be called with the owning task locked (via task_lock()) + * when task != current. The reason being that the vast majority of callers are + * looking at current->group_info, which can not be changed except by the + * current task. Changing current->group_info requires the task lock, too. + */ #define get_group_info(group_info) do { \ atomic_inc(&(group_info)->usage); \ } while (0) @@ -408,6 +414,10 @@ struct task_struct { unsigned int time_slice, first_time_slice; struct list_head tasks; + /* + * ptrace_list/ptrace_children forms the list of my children + * that were stolen by a ptracer. + */ struct list_head ptrace_children; struct list_head ptrace_list; @@ -429,6 +439,10 @@ struct task_struct { */ struct task_struct *real_parent; /* real parent process (when being debugged) */ struct task_struct *parent; /* parent process */ + /* + * children/sibling forms the list of my children plus the + * tasks I'm ptracing. + */ struct list_head children; /* list of my children */ struct list_head sibling; /* linkage in my parent's children list */ struct task_struct *group_leader; /* threadgroup leader */ @@ -693,9 +707,9 @@ extern void sched_balance_exec(void); extern void sched_idle_next(void); extern void set_user_nice(task_t *p, long nice); -extern int task_prio(task_t *p); -extern int task_nice(task_t *p); -extern int task_curr(task_t *p); +extern int task_prio(const task_t *p); +extern int task_nice(const task_t *p); +extern int task_curr(const task_t *p); extern int idle_cpu(int cpu); void yield(void); @@ -731,6 +745,11 @@ extern void __set_special_pids(pid_t session, pid_t pgrp); /* per-UID process charging. */ extern struct user_struct * alloc_uid(xid_t, uid_t); +static inline struct user_struct *get_uid(struct user_struct *u) +{ + atomic_inc(&u->__count); + return u; +} extern void free_uid(struct user_struct *); extern void switch_uid(struct user_struct *); @@ -917,7 +936,7 @@ extern void wait_task_inactive(task_t * p); #define while_each_thread(g, t) \ while ((t = next_thread(t)) != g) -extern task_t * FASTCALL(next_thread(task_t *p)); +extern task_t * FASTCALL(next_thread(const task_t *p)); #define thread_group_leader(p) (p->pid == p->tgid) @@ -934,7 +953,9 @@ static inline int thread_group_empty(task_t *p) extern void unhash_process(struct task_struct *p); /* - * Protects ->fs, ->files, ->mm, ->ptrace and synchronises with wait4(). + * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info and synchronises with + * wait4(). + * * Nests both inside and outside of read_lock(&tasklist_lock). * It must not be nested with write_lock_irq(&tasklist_lock), * neither inside nor outside. @@ -1056,7 +1077,7 @@ extern void signal_wake_up(struct task_struct *t, int resume_stopped); */ #ifdef CONFIG_SMP -static inline unsigned int task_cpu(struct task_struct *p) +static inline unsigned int task_cpu(const struct task_struct *p) { return p->thread_info->cpu; } @@ -1068,7 +1089,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #else -static inline unsigned int task_cpu(struct task_struct *p) +static inline unsigned int task_cpu(const struct task_struct *p) { return 0; }