extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
-extern void update_one_process(struct task_struct *p, unsigned long user,
- unsigned long system, int cpu);
extern void scheduler_tick(int user_tick, int system);
extern unsigned long cache_decay_ticks;
unsigned long saved_auxv[40]; /* for /proc/PID/auxv */
unsigned dumpable:1;
-#ifdef CONFIG_HUGETLB_PAGE
- int used_hugetlb;
-#endif
cpumask_t cpu_vm_mask;
/* Architecture-specific MM context */
atomic_t __count; /* reference count */
atomic_t processes; /* How many processes does this user have? */
atomic_t files; /* How many open files does this user have? */
+ atomic_t sigpending; /* How many pending signals does this user have? */
+ /* protected by mq_lock */
+ unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
/* Hash table maintenance information */
struct list_head uidhash_list;
struct task_struct *it_process; /* process to send signal to */
struct timer_list it_timer;
struct sigqueue *sigq; /* signal queue entry. */
+ struct list_head abs_timer_entry; /* clock abs_timer_list */
+ struct timespec wall_to_prev; /* wall_to_monotonic used when set */
};
gid_t *blocks[0];
};
+/*
+ * get_group_info() must be called with the owning task locked (via task_lock())
+ * when task != current. The reason being that the vast majority of callers are
+ * looking at current->group_info, which can not be changed except by the
+ * current task. Changing current->group_info requires the task lock, too.
+ */
#define get_group_info(group_info) do { \
atomic_inc(&(group_info)->usage); \
} while (0)
unsigned int time_slice, first_time_slice;
struct list_head tasks;
+ /*
+ * ptrace_list/ptrace_children forms the list of my children
+ * that were stolen by a ptracer.
+ */
struct list_head ptrace_children;
struct list_head ptrace_list;
*/
struct task_struct *real_parent; /* real parent process (when being debugged) */
struct task_struct *parent; /* parent process */
+ /*
+ * children/sibling forms the list of my children plus the
+ * tasks I'm ptracing.
+ */
struct list_head children; /* list of my children */
struct list_head sibling; /* linkage in my parent's children list */
struct task_struct *group_leader; /* threadgroup leader */
extern void sched_idle_next(void);
extern void set_user_nice(task_t *p, long nice);
-extern int task_prio(task_t *p);
-extern int task_nice(task_t *p);
-extern int task_curr(task_t *p);
+extern int task_prio(const task_t *p);
+extern int task_nice(const task_t *p);
+extern int task_curr(const task_t *p);
extern int idle_cpu(int cpu);
void yield(void);
/* per-UID process charging. */
extern struct user_struct * alloc_uid(xid_t, uid_t);
+static inline struct user_struct *get_uid(struct user_struct *u)
+{
+ atomic_inc(&u->__count);
+ return u;
+}
extern void free_uid(struct user_struct *);
extern void switch_uid(struct user_struct *);
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
-extern task_t * FASTCALL(next_thread(task_t *p));
+extern task_t * FASTCALL(next_thread(const task_t *p));
#define thread_group_leader(p) (p->pid == p->tgid)
extern void unhash_process(struct task_struct *p);
/*
- * Protects ->fs, ->files, ->mm, ->ptrace and synchronises with wait4().
+ * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info and synchronises with
+ * wait4().
+ *
* Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock),
* neither inside nor outside.
*/
#ifdef CONFIG_SMP
-static inline unsigned int task_cpu(struct task_struct *p)
+static inline unsigned int task_cpu(const struct task_struct *p)
{
return p->thread_info->cpu;
}
#else
-static inline unsigned int task_cpu(struct task_struct *p)
+static inline unsigned int task_cpu(const struct task_struct *p)
{
return 0;
}