X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Flinux%2Fsched.h;h=0d9a084153080c3f761d8557774016eafdf588a3;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=73d7127e3f282e22f9685dacf0eaab5ff99180db;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/linux/sched.h b/include/linux/sched.h index 73d7127e3..0d9a08415 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -13,12 +13,14 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include @@ -29,6 +31,8 @@ #include #include #include +#include +#include struct exec_domain; @@ -40,7 +44,6 @@ struct exec_domain; #define CLONE_FS 0x00000200 /* set if fs info shared between processes */ #define CLONE_FILES 0x00000400 /* set if open files shared between processes */ #define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ -#define CLONE_IDLETASK 0x00001000 /* set if new pid should be 0 (kernel only)*/ #define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ #define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ #define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ @@ -85,9 +88,7 @@ extern unsigned long avenrun[]; /* Load averages */ load += n*(FIXED_1-exp); \ load >>= FSHIFT; -#define CT_TO_SECS(x) ((x) / HZ) -#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ) - +extern unsigned long total_forks; extern int nr_threads; extern int last_pid; DECLARE_PER_CPU(unsigned long, process_counts); @@ -107,8 +108,10 @@ extern unsigned long nr_iowait(void); #define TASK_INTERRUPTIBLE 1 #define TASK_UNINTERRUPTIBLE 2 #define TASK_STOPPED 4 -#define TASK_ZOMBIE 8 -#define TASK_DEAD 16 +#define TASK_TRACED 8 +#define EXIT_ZOMBIE 16 +#define EXIT_DEAD 32 +#define TASK_ONHOLD 64 #define __set_task_state(tsk, state_value) \ do { (tsk)->state = (state_value); } while (0) @@ -120,6 +123,9 @@ extern unsigned long nr_iowait(void); #define set_current_state(state_value) \ set_mb(current->state, (state_value)) +/* Task command name length */ +#define TASK_COMM_LEN 16 + /* * Scheduling policies */ @@ -147,9 +153,10 @@ extern spinlock_t mmlist_lock; typedef struct task_struct task_t; extern void sched_init(void); +extern void sched_init_smp(void); extern void init_idle(task_t *idle, int cpu); -extern cpumask_t idle_cpu_mask; +extern cpumask_t nohz_cpu_mask; extern void show_state(void); extern void show_regs(struct pt_regs *); @@ -167,13 +174,13 @@ long io_schedule_timeout(long timeout); extern void cpu_init (void); extern void trap_init(void); extern void update_process_times(int user); -extern void update_one_process(struct task_struct *p, unsigned long user, - unsigned long system, int cpu); -extern void scheduler_tick(int user_tick, int system); +extern void scheduler_tick(void); extern unsigned long cache_decay_ticks; -extern const unsigned long scheduling_functions_start_here; -extern const unsigned long scheduling_functions_end_here; +/* Attach to any functions which should be ignored in wchan output. */ +#define __sched __attribute__((__section__(".sched.text"))) +/* Is this address in the __sched functions? */ +extern int in_sched_functions(unsigned long addr); #define MAX_SCHEDULE_TIMEOUT LONG_MAX extern signed long FASTCALL(schedule_timeout(signed long timeout)); @@ -188,19 +195,35 @@ extern int sysctl_max_map_count; #include +extern unsigned long +arch_get_unmapped_area(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); +extern unsigned long +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); +extern void arch_unmap_area(struct vm_area_struct *area); +extern void arch_unmap_area_topdown(struct vm_area_struct *area); + + struct mm_struct { struct vm_area_struct * mmap; /* list of VMAs */ struct rb_root mm_rb; struct vm_area_struct * mmap_cache; /* last find_vma result */ + unsigned long (*get_unmapped_area) (struct file *filp, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags); + void (*unmap_area) (struct vm_area_struct *area); + unsigned long mmap_base; /* base of mmap area */ unsigned long free_area_cache; /* first hole */ pgd_t * pgd; atomic_t mm_users; /* How many users with user space? */ atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ int map_count; /* number of VMAs */ struct rw_semaphore mmap_sem; - spinlock_t page_table_lock; /* Protects task page tables and mm->rss */ + spinlock_t page_table_lock; /* Protects page tables, mm->rss, mm->anon_rss */ - struct list_head mmlist; /* List of all active mm's. These are globally strung + struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung * together off init_mm.mmlist, and are protected * by mmlist_lock */ @@ -208,19 +231,21 @@ struct mm_struct { unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; unsigned long arg_start, arg_end, env_start, env_end; - unsigned long rss, total_vm, locked_vm; - unsigned long def_flags; + unsigned long rss, anon_rss, total_vm, locked_vm, shared_vm; + unsigned long exec_vm, stack_vm, reserved_vm, def_flags, nr_ptes; - unsigned long saved_auxv[40]; /* for /proc/PID/auxv */ + unsigned long saved_auxv[42]; /* for /proc/PID/auxv */ unsigned dumpable:1; -#ifdef CONFIG_HUGETLB_PAGE - int used_hugetlb; -#endif cpumask_t cpu_vm_mask; /* Architecture-specific MM context */ mm_context_t context; + struct vx_info *mm_vx_info; + + /* Token based thrashing protection. */ + unsigned long swap_token_time; + char recent_pagein; /* coredumping support */ int core_waiters; @@ -231,9 +256,10 @@ struct mm_struct { struct kioctx *ioctx_list; struct kioctx default_kioctx; -}; -extern int mmlist_nr; + unsigned long hiwater_rss; /* High-water RSS usage */ + unsigned long hiwater_vm; /* High-water virtual memory usage */ +}; struct sighand_struct { atomic_t count; @@ -250,6 +276,9 @@ struct sighand_struct { */ struct signal_struct { atomic_t count; + atomic_t live; + + wait_queue_head_t wait_chldexit; /* for wait4() */ /* current thread group signal load-balancing target: */ task_t *curr_target; @@ -258,7 +287,6 @@ struct signal_struct { struct sigpending shared_pending; /* thread group exit support */ - int group_exit; int group_exit_code; /* overloaded: * - notify group_exit_task when ->count is equal to notify_count @@ -270,6 +298,7 @@ struct signal_struct { /* thread group stop support, overloads group_exit_code too */ int group_stop_count; + unsigned int flags; /* see SIGNAL_* flags below */ /* POSIX.1b Interval Timers */ struct list_head posix_timers; @@ -282,15 +311,45 @@ struct signal_struct { int leader; struct tty_struct *tty; /* NULL if no tty */ + + /* + * Cumulative resource counters for dead threads in the group, + * and for reaped dead child processes forked by this group. + * Live threads maintain their own counters and add to these + * in __exit_signal, except for the group leader. + */ + cputime_t utime, stime, cutime, cstime; + unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; + unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; + + /* + * We don't bother to synchronize most readers of this at all, + * because there is no reader checking a limit that actually needs + * to get both rlim_cur and rlim_max atomically, and either one + * alone is a single word that can safely be read normally. + * getrlimit/setrlimit use task_lock(current->group_leader) to + * protect this instead of the siglock, because they really + * have no need to disable irqs. + */ + struct rlimit rlim[RLIM_NLIMITS]; }; +/* + * Bits in flags field of signal_struct. + */ +#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ +#define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */ +#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */ +#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */ + + /* * Priority of a process goes from 0..MAX_PRIO-1, valid RT * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL tasks are * in the range MAX_RT_PRIO..MAX_PRIO-1. Priority values * are inverted: lower p->prio value means higher priority. * - * The MAX_RT_USER_PRIO value allows the actual maximum + * The MAX_USER_RT_PRIO value allows the actual maximum * RT priority to be separate from the value exported to * user-space. This allows kernel threads to set their * priority to a value higher than any user task. Note: @@ -302,7 +361,7 @@ struct signal_struct { #define MAX_PRIO (MAX_RT_PRIO + 40) -#define rt_task(p) ((p)->prio < MAX_RT_PRIO) +#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO)) /* * Some day this will be a full-fledged user tracking system.. @@ -311,13 +370,23 @@ struct user_struct { atomic_t __count; /* reference count */ atomic_t processes; /* How many processes does this user have? */ atomic_t files; /* How many open files does this user have? */ + atomic_t sigpending; /* How many pending signals does this user have? */ + /* protected by mq_lock */ + unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ + unsigned long locked_shm; /* How many pages of mlocked shm ? */ + +#ifdef CONFIG_KEYS + struct key *uid_keyring; /* UID specific keyring */ + struct key *session_keyring; /* UID's default session keyring */ +#endif /* Hash table maintenance information */ struct list_head uidhash_list; uid_t uid; + xid_t xid; }; -extern struct user_struct *find_user(uid_t); +extern struct user_struct *find_user(xid_t, uid_t); extern struct user_struct root_user; #define INIT_USER (&root_user) @@ -326,30 +395,106 @@ typedef struct prio_array prio_array_t; struct backing_dev_info; struct reclaim_state; -/* POSIX.1b interval timer structure. */ -struct k_itimer { - struct list_head list; /* free/ allocate list */ - spinlock_t it_lock; - clockid_t it_clock; /* which timer type */ - timer_t it_id; /* timer id */ - int it_overrun; /* overrun on pending signal */ - int it_overrun_last; /* overrun on last delivered signal */ - int it_requeue_pending; /* waiting to requeue this timer */ - int it_sigev_notify; /* notify word of sigevent struct */ - int it_sigev_signo; /* signo word of sigevent struct */ - sigval_t it_sigev_value; /* value word of sigevent struct */ - unsigned long it_incr; /* interval specified in jiffies */ - struct task_struct *it_process; /* process to send signal to */ - struct timer_list it_timer; - struct sigqueue *sigq; /* signal queue entry. */ +#ifdef CONFIG_SCHEDSTATS +struct sched_info { + /* cumulative counters */ + unsigned long cpu_time, /* time spent on the cpu */ + run_delay, /* time spent waiting on a runqueue */ + pcnt; /* # of timeslices run on this cpu */ + + /* timestamps */ + unsigned long last_arrival, /* when we last ran on a cpu */ + last_queued; /* when we were last queued to run */ }; +extern struct file_operations proc_schedstat_operations; +#endif + +enum idle_type +{ + SCHED_IDLE, + NOT_IDLE, + NEWLY_IDLE, + MAX_IDLE_TYPES +}; + +/* + * sched-domains (multiprocessor balancing) declarations: + */ +#ifdef CONFIG_SMP +#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */ + +#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */ +#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */ +#define SD_BALANCE_EXEC 4 /* Balance on exec */ +#define SD_WAKE_IDLE 8 /* Wake to idle CPU on task wakeup */ +#define SD_WAKE_AFFINE 16 /* Wake task to waking CPU */ +#define SD_WAKE_BALANCE 32 /* Perform balancing at task wakeup */ +#define SD_SHARE_CPUPOWER 64 /* Domain members share cpu power */ + +struct sched_group { + struct sched_group *next; /* Must be a circular list */ + cpumask_t cpumask; + + /* + * CPU power of this group, SCHED_LOAD_SCALE being max power for a + * single CPU. This is read only (except for setup, hotplug CPU). + */ + unsigned long cpu_power; +}; + +struct sched_domain { + /* These fields must be setup */ + struct sched_domain *parent; /* top domain must be null terminated */ + struct sched_group *groups; /* the balancing groups of the domain */ + cpumask_t span; /* span of all CPUs in this domain */ + unsigned long min_interval; /* Minimum balance interval ms */ + unsigned long max_interval; /* Maximum balance interval ms */ + unsigned int busy_factor; /* less balancing by factor if busy */ + unsigned int imbalance_pct; /* No balance until over watermark */ + unsigned long long cache_hot_time; /* Task considered cache hot (ns) */ + unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ + unsigned int per_cpu_gain; /* CPU % gained by adding domain cpus */ + int flags; /* See SD_* */ + + /* Runtime fields. */ + unsigned long last_balance; /* init to jiffies. units in jiffies */ + unsigned int balance_interval; /* initialise to 1. units in ms. */ + unsigned int nr_balance_failed; /* initialise to 0 */ + +#ifdef CONFIG_SCHEDSTATS + /* load_balance() stats */ + unsigned long lb_cnt[MAX_IDLE_TYPES]; + unsigned long lb_failed[MAX_IDLE_TYPES]; + unsigned long lb_imbalance[MAX_IDLE_TYPES]; + unsigned long lb_nobusyg[MAX_IDLE_TYPES]; + unsigned long lb_nobusyq[MAX_IDLE_TYPES]; + + /* sched_balance_exec() stats */ + unsigned long sbe_attempts; + unsigned long sbe_pushed; + + /* try_to_wake_up() stats */ + unsigned long ttwu_wake_affine; + unsigned long ttwu_wake_balance; +#endif +}; + +#ifdef ARCH_HAS_SCHED_DOMAIN +/* Useful helpers that arch setup code may use. Defined in kernel/sched.c */ +extern cpumask_t cpu_isolated_map; +extern void init_sched_build_groups(struct sched_group groups[], + cpumask_t span, int (*group_fn)(int cpu)); +extern void cpu_attach_domain(struct sched_domain *sd, int cpu); +#endif /* ARCH_HAS_SCHED_DOMAIN */ +#endif /* CONFIG_SMP */ + struct io_context; /* See blkdev.h */ void exit_io_context(void); #define NGROUPS_SMALL 32 -#define NGROUPS_PER_BLOCK ((int)(EXEC_PAGESIZE / sizeof(gid_t))) +#define NGROUPS_PER_BLOCK ((int)(PAGE_SIZE / sizeof(gid_t))) struct group_info { int ngroups; atomic_t usage; @@ -358,6 +503,12 @@ struct group_info { gid_t *blocks[0]; }; +/* + * get_group_info() must be called with the owning task locked (via task_lock()) + * when task != current. The reason being that the vast majority of callers are + * looking at current->group_info, which can not be changed except by the + * current task. Changing current->group_info requires the task lock, too. + */ #define get_group_info(group_info) do { \ atomic_inc(&(group_info)->usage); \ } while (0) @@ -376,6 +527,7 @@ int set_current_groups(struct group_info *group_info); struct audit_context; /* See audit.c */ +struct mempolicy; struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ @@ -391,15 +543,22 @@ struct task_struct { prio_array_t *array; unsigned long sleep_avg; - long interactive_credit; - unsigned long long timestamp; + unsigned long long timestamp, last_ran; int activated; unsigned long policy; cpumask_t cpus_allowed; unsigned int time_slice, first_time_slice; +#ifdef CONFIG_SCHEDSTATS + struct sched_info sched_info; +#endif + struct list_head tasks; + /* + * ptrace_list/ptrace_children forms the list of my children + * that were stolen by a ptracer. + */ struct list_head ptrace_children; struct list_head ptrace_list; @@ -407,11 +566,12 @@ struct task_struct { /* task state */ struct linux_binfmt *binfmt; + long exit_state; int exit_code, exit_signal; int pdeath_signal; /* The signal sent when the parent dies */ /* ??? */ unsigned long personality; - int did_exec:1; + unsigned did_exec:1; pid_t pid; pid_t tgid; /* @@ -421,38 +581,45 @@ struct task_struct { */ struct task_struct *real_parent; /* real parent process (when being debugged) */ struct task_struct *parent; /* parent process */ + /* + * children/sibling forms the list of my children plus the + * tasks I'm ptracing. + */ struct list_head children; /* list of my children */ struct list_head sibling; /* linkage in my parent's children list */ struct task_struct *group_leader; /* threadgroup leader */ /* PID/PID hash table linkage. */ - struct pid_link pids[PIDTYPE_MAX]; + struct pid pids[PIDTYPE_MAX]; - wait_queue_head_t wait_chldexit; /* for wait4() */ struct completion *vfork_done; /* for vfork() */ int __user *set_child_tid; /* CLONE_CHILD_SETTID */ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ unsigned long rt_priority; - unsigned long it_real_value, it_prof_value, it_virt_value; - unsigned long it_real_incr, it_prof_incr, it_virt_incr; + unsigned long it_real_value, it_real_incr; + cputime_t it_virt_value, it_virt_incr; + cputime_t it_prof_value, it_prof_incr; struct timer_list real_timer; - unsigned long utime, stime, cutime, cstime; - unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; /* context switch counts */ - u64 start_time; + cputime_t utime, stime; + unsigned long nvcsw, nivcsw; /* context switch counts */ + struct timespec start_time; /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ - unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; + unsigned long min_flt, maj_flt; /* process credentials */ uid_t uid,euid,suid,fsuid; gid_t gid,egid,sgid,fsgid; struct group_info *group_info; kernel_cap_t cap_effective, cap_inheritable, cap_permitted; - int keep_capabilities:1; + unsigned keep_capabilities:1; struct user_struct *user; -/* limits */ - struct rlimit rlim[RLIM_NLIMITS]; - unsigned short used_math; - char comm[16]; +#ifdef CONFIG_KEYS + struct key *session_keyring; /* keyring inherited over fork */ + struct key *process_keyring; /* keyring private to this process (CLONE_THREAD) */ + struct key *thread_keyring; /* keyring private to this thread */ +#endif + int oomkilladj; /* OOM kill score adjustment (bit shift). */ + char comm[TASK_COMM_LEN]; /* file system info */ int link_count, total_link_count; /* ipc stuff */ @@ -481,10 +648,18 @@ struct task_struct { void *security; struct audit_context *audit_context; +/* vserver context data */ + xid_t xid; + struct vx_info *vx_info; + +/* vserver network data */ + nid_t nid; + struct nx_info *nx_info; + /* Thread group tracking */ u32 parent_exec_id; u32 self_exec_id; -/* Protection of (de-)allocation: mm, files, fs, tty */ +/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ spinlock_t alloc_lock; /* Protection of proc_dentry: nesting proc_lock, dcache_lock, write_lock_irq(&tasklist_lock); */ spinlock_t proc_lock; @@ -504,6 +679,24 @@ struct task_struct { unsigned long ptrace_message; siginfo_t *last_siginfo; /* For ptrace use. */ +/* + * current io wait handle: wait queue entry to use for io waits + * If this thread is processing aio, this points at the waitqueue + * inside the currently handled kiocb. It may be NULL (i.e. default + * to a stack based synchronous wait) if its doing sync IO. + */ + wait_queue_t *io_wait; +/* i/o counters(bytes read/written, #syscalls */ + u64 rchar, wchar, syscr, syscw; +#if defined(CONFIG_BSD_PROCESS_ACCT) + u64 acct_rss_mem1; /* accumulated rss usage */ + u64 acct_vm_mem1; /* accumulated virtual memory usage */ + clock_t acct_stimexpd; /* clock_t-converted stime since last update */ +#endif +#ifdef CONFIG_NUMA + struct mempolicy *mempolicy; + short il_next; +#endif }; static inline pid_t process_group(struct task_struct *tsk) @@ -511,6 +704,20 @@ static inline pid_t process_group(struct task_struct *tsk) return tsk->signal->pgrp; } +/** + * pid_alive - check that a task structure is not stale + * @p: Task structure to be checked. + * + * Test if a process is not yet dead (at most zombie state) + * If pid_alive fails, then pointers within the task structure + * can be stale and must not be dereferenced. + */ +static inline int pid_alive(struct task_struct *p) +{ + return p->pids[PIDTYPE_PID].nr != 0; +} + +extern void free_task(struct task_struct *tsk); extern void __put_task_struct(struct task_struct *tsk); #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) #define put_task_struct(tsk) \ @@ -529,10 +736,9 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) #define PF_DUMPCORE 0x00000200 /* dumped core */ #define PF_SIGNALED 0x00000400 /* killed by a signal */ #define PF_MEMALLOC 0x00000800 /* Allocating memory */ -#define PF_MEMDIE 0x00001000 /* Killed for out-of-memory */ -#define PF_FLUSHER 0x00002000 /* responsible for disk writeback */ - -#define PF_FREEZE 0x00004000 /* this task should be frozen for suspend */ +#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */ +#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ +#define PF_FREEZE 0x00004000 /* this task is being frozen for suspend now */ #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ #define PF_FROZEN 0x00010000 /* frozen for system suspend */ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */ @@ -540,33 +746,67 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) #define PF_SWAPOFF 0x00080000 /* I am in swapoff */ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ #define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ +#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */ + +/* + * Only the _current_ task can read/write to tsk->flags, but other + * tasks can access tsk->flags in readonly mode for example + * with tsk_used_math (like during threaded core dumping). + * There is however an exception to this rule during ptrace + * or during fork: the ptracer task is allowed to write to the + * child->flags of its traced child (same goes for fork, the parent + * can write to the child->flags), because we're guaranteed the + * child is not running and in turn not changing child->flags + * at the same time the parent does it. + */ +#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) +#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) +#define clear_used_math() clear_stopped_child_used_math(current) +#define set_used_math() set_stopped_child_used_math(current) +#define conditional_stopped_child_used_math(condition, child) \ + do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) +#define conditional_used_math(condition) \ + conditional_stopped_child_used_math(condition, current) +#define copy_to_stopped_child_used_math(child) \ + do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) +/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ +#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) +#define used_math() tsk_used_math(current) #ifdef CONFIG_SMP extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); #else static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) { + if (!cpus_intersects(new_mask, cpu_online_map)) + return -EINVAL; return 0; } #endif extern unsigned long long sched_clock(void); -#ifdef CONFIG_NUMA -extern void sched_balance_exec(void); -extern void node_nr_running_init(void); +/* sched_exec is called by processes performing an exec */ +#ifdef CONFIG_SMP +extern void sched_exec(void); #else -#define sched_balance_exec() {} -#define node_nr_running_init() {} +#define sched_exec() {} #endif -/* Move tasks off this (offline) CPU onto another. */ -extern void migrate_all_tasks(void); +#ifdef CONFIG_HOTPLUG_CPU +extern void idle_task_exit(void); +#else +static inline void idle_task_exit(void) {} +#endif + +extern void sched_idle_next(void); extern void set_user_nice(task_t *p, long nice); -extern int task_prio(task_t *p); -extern int task_nice(task_t *p); -extern int task_curr(task_t *p); +extern int task_prio(const task_t *p); +extern int task_nice(const task_t *p); +extern int task_curr(const task_t *p); extern int idle_cpu(int cpu); +extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); +extern task_t *idle_task(int cpu); void yield(void); @@ -595,29 +835,40 @@ extern struct task_struct init_task; extern struct mm_struct init_mm; -extern struct task_struct *find_task_by_pid(int pid); + +#define find_task_by_real_pid(nr) \ + find_task_by_pid_type(PIDTYPE_PID, nr) +#define find_task_by_pid(nr) \ + find_task_by_pid_type(PIDTYPE_PID, \ + vx_rmap_pid(nr)) + +extern struct task_struct *find_task_by_pid_type(int type, int pid); extern void set_special_pids(pid_t session, pid_t pgrp); extern void __set_special_pids(pid_t session, pid_t pgrp); /* per-UID process charging. */ -extern struct user_struct * alloc_uid(uid_t); +extern struct user_struct * alloc_uid(xid_t, uid_t); +static inline struct user_struct *get_uid(struct user_struct *u) +{ + atomic_inc(&u->__count); + return u; +} extern void free_uid(struct user_struct *); extern void switch_uid(struct user_struct *); #include -extern unsigned long itimer_ticks; -extern unsigned long itimer_next; extern void do_timer(struct pt_regs *); extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state)); extern int FASTCALL(wake_up_process(struct task_struct * tsk)); +extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, + unsigned long clone_flags)); #ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); #else static inline void kick_process(struct task_struct *tsk) { } #endif -extern void FASTCALL(wake_up_forked_process(struct task_struct * tsk)); extern void FASTCALL(sched_fork(task_t * p)); extern void FASTCALL(sched_exit(task_t * p)); @@ -647,12 +898,11 @@ extern void unblock_all_signals(void); extern void release_task(struct task_struct * p); extern int send_sig_info(int, struct siginfo *, struct task_struct *); extern int send_group_sig_info(int, struct siginfo *, struct task_struct *); +extern int force_sigsegv(int, struct task_struct *); extern int force_sig_info(int, struct siginfo *, struct task_struct *); extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp); extern int kill_pg_info(int, struct siginfo *, pid_t); -extern int kill_sl_info(int, struct siginfo *, pid_t); extern int kill_proc_info(int, struct siginfo *, pid_t); -extern void notify_parent(struct task_struct *, int); extern void do_notify_parent(struct task_struct *, int); extern void force_sig(int, struct task_struct *); extern void force_sig_specific(int, struct task_struct *); @@ -690,15 +940,28 @@ static inline int sas_ss_flags(unsigned long sp) #ifdef CONFIG_SECURITY /* code is in security.c */ extern int capable(int cap); +extern int vx_capable(int cap, int ccap); #else static inline int capable(int cap) { + if (vx_check_bit(VXC_CAP_MASK, cap) && !vx_mcaps(1L << cap)) + return 0; if (cap_raised(current->cap_effective, cap)) { current->flags |= PF_SUPERPRIV; return 1; } return 0; } + +static inline int vx_capable(int cap, int ccap) +{ + if (cap_raised(current->cap_effective, cap) && + vx_ccaps(ccap)) { + current->flags |= PF_SUPERPRIV; + return 1; + } + return 0; +} #endif /* @@ -716,8 +979,8 @@ static inline void mmdrop(struct mm_struct * mm) /* mmput gets rid of the mappings and all user-space */ extern void mmput(struct mm_struct *); -/* Grab a reference to the mm if its not already going away */ -extern struct mm_struct *mmgrab(struct mm_struct *); +/* Grab a reference to a task's mm, if it is not already going away */ +extern struct mm_struct *get_task_mm(struct task_struct *task); /* Remove the current tasks stale references to the old mm_struct */ extern void mm_release(struct task_struct *, struct mm_struct *); @@ -743,7 +1006,10 @@ extern task_t *child_reaper; extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); -extern struct task_struct * copy_process(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); +task_t *fork_idle(int); + +extern void set_task_comm(struct task_struct *tsk, char *from); +extern void get_task_comm(char *to, struct task_struct *tsk); #ifdef CONFIG_SMP extern void wait_task_inactive(task_t * p); @@ -782,15 +1048,13 @@ extern void wait_task_inactive(task_t * p); #define while_each_thread(g, t) \ while ((t = next_thread(t)) != g) -extern task_t * FASTCALL(next_thread(task_t *p)); +extern task_t * FASTCALL(next_thread(const task_t *p)); #define thread_group_leader(p) (p->pid == p->tgid) static inline int thread_group_empty(task_t *p) { - struct pid *pid = p->pids[PIDTYPE_TGID].pidptr; - - return pid->task_list.next->next == &pid->task_list; + return list_empty(&p->pids[PIDTYPE_TGID].pid_list); } #define delay_group_leader(p) \ @@ -799,7 +1063,9 @@ static inline int thread_group_empty(task_t *p) extern void unhash_process(struct task_struct *p); /* - * Protects ->fs, ->files, ->mm, ->ptrace and synchronises with wait4(). + * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring + * subscriptions and synchronises with wait4(). Also used in procfs. + * * Nests both inside and outside of read_lock(&tasklist_lock). * It must not be nested with write_lock_irq(&tasklist_lock), * neither inside nor outside. @@ -813,27 +1079,7 @@ static inline void task_unlock(struct task_struct *p) { spin_unlock(&p->alloc_lock); } - -/** - * get_task_mm - acquire a reference to the task's mm - * - * Returns %NULL if the task has no mm. User must release - * the mm via mmput() after use. - */ -static inline struct mm_struct * get_task_mm(struct task_struct * task) -{ - struct mm_struct * mm; - - task_lock(task); - mm = task->mm; - if (mm) - mm = mmgrab(mm); - task_unlock(task); - return mm; -} - - /* set thread flags in other task's structures * - see asm/thread_info.h for TIF_xxxx flags available */ @@ -882,29 +1128,36 @@ static inline int need_resched(void) return unlikely(test_thread_flag(TIF_NEED_RESCHED)); } -extern void __cond_resched(void); -static inline void cond_resched(void) -{ - if (need_resched()) - __cond_resched(); -} +/* + * cond_resched() and cond_resched_lock(): latency reduction via + * explicit rescheduling in places that are safe. The return + * value indicates whether a reschedule was done in fact. + * cond_resched_lock() will drop the spinlock before scheduling, + * cond_resched_softirq() will enable bhs before scheduling. + */ +extern int cond_resched(void); +extern int cond_resched_lock(spinlock_t * lock); +extern int cond_resched_softirq(void); /* - * cond_resched_lock() - if a reschedule is pending, drop the given lock, - * call schedule, and on return reacquire the lock. - * - * This works OK both with and without CONFIG_PREEMPT. We do strange low-level - * operations here to prevent schedule() from being called twice (once via - * spin_unlock(), once by hand). + * Does a critical section need to be broken due to another + * task waiting?: + */ +#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) +# define need_lockbreak(lock) ((lock)->break_lock) +#else +# define need_lockbreak(lock) 0 +#endif + +/* + * Does a critical section need to be broken due to another + * task waiting or preemption being signalled: */ -static inline void cond_resched_lock(spinlock_t * lock) +static inline int lock_need_resched(spinlock_t *lock) { - if (need_resched()) { - _raw_spin_unlock(lock); - preempt_enable_no_resched(); - __cond_resched(); - spin_lock(lock); - } + if (need_lockbreak(lock) || need_resched()) + return 1; + return 0; } /* Reevaluate whether the task has signals pending delivery. @@ -921,7 +1174,7 @@ extern void signal_wake_up(struct task_struct *t, int resume_stopped); */ #ifdef CONFIG_SMP -static inline unsigned int task_cpu(struct task_struct *p) +static inline unsigned int task_cpu(const struct task_struct *p) { return p->thread_info->cpu; } @@ -933,7 +1186,7 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #else -static inline unsigned int task_cpu(struct task_struct *p) +static inline unsigned int task_cpu(const struct task_struct *p) { return 0; } @@ -944,6 +1197,54 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ +#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT +extern void arch_pick_mmap_layout(struct mm_struct *mm); +#else +static inline void arch_pick_mmap_layout(struct mm_struct *mm) +{ + mm->mmap_base = TASK_UNMAPPED_BASE; + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; +} +#endif + +extern long sched_setaffinity(pid_t pid, cpumask_t new_mask); +extern long sched_getaffinity(pid_t pid, cpumask_t *mask); + +#ifdef CONFIG_MAGIC_SYSRQ + +extern void normalize_rt_tasks(void); + +#endif + +/* try_to_freeze + * + * Checks whether we need to enter the refrigerator + * and returns 1 if we did so. + */ +#ifdef CONFIG_PM +extern void refrigerator(unsigned long); +extern int freeze_processes(void); +extern void thaw_processes(void); + +static inline int try_to_freeze(unsigned long refrigerator_flags) +{ + if (unlikely(current->flags & PF_FREEZE)) { + refrigerator(refrigerator_flags); + return 1; + } else + return 0; +} +#else +static inline void refrigerator(unsigned long flag) {} +static inline int freeze_processes(void) { BUG(); return 0; } +static inline void thaw_processes(void) {} + +static inline int try_to_freeze(unsigned long refrigerator_flags) +{ + return 0; +} +#endif /* CONFIG_PM */ #endif /* __KERNEL__ */ #endif