X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Flinux%2Fmempolicy.h;h=daabb3aa1ec6b96b5078f6236d2df87fbfe710f6;hb=refs%2Fheads%2Fvserver;hp=2aeecaf7145b5179d6f4f9a1300bd167e2f4830d;hpb=e812ccbe0c915857ebea6a632bfadc631f7504a9;p=linux-2.6.git diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 2aeecaf71..daabb3aa1 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -22,17 +22,20 @@ /* Flags for mbind */ #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ +#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */ +#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ +#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */ #ifdef __KERNEL__ -#include #include -#include #include #include -#include +#include +#include struct vm_area_struct; +struct mm_struct; #ifdef CONFIG_NUMA @@ -47,8 +50,7 @@ struct vm_area_struct; * Locking policy for interlave: * In process context there is no locking because only the process accesses * its own state. All vma manipulation is somewhat protected by a down_read on - * mmap_sem. For allocating in the interleave policy the page_table_lock - * must be also aquired to protect il_next. + * mmap_sem. * * Freeing policy: * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. @@ -63,14 +65,12 @@ struct mempolicy { union { struct zonelist *zonelist; /* bind */ short preferred_node; /* preferred */ - DECLARE_BITMAP(nodes, MAX_NUMNODES); /* interleave */ + nodemask_t nodes; /* interleave */ /* undefined for default */ } v; + nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */ }; -/* An NULL mempolicy pointer is a synonym of &default_policy. */ -extern struct mempolicy default_policy; - /* * Support for managing mempolicy data objects (clone, copy, destroy) * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. @@ -113,14 +113,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) -/* - * Hugetlb policy. i386 hugetlb so far works with node numbers - * instead of zone lists, so give it special interfaces for now. - */ -extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr); -extern int mpol_node_valid(int nid, struct vm_area_struct *vma, - unsigned long addr); - /* * Tree of shared policies for a shared memory region. * Maintain the policies in a pseudo mm that contains vmas. The vmas @@ -137,15 +129,11 @@ struct sp_node { struct shared_policy { struct rb_root root; - struct semaphore sem; + spinlock_t lock; }; -static inline void mpol_shared_policy_init(struct shared_policy *info) -{ - info->root = RB_ROOT; - init_MUTEX(&info->sem); -} - +void mpol_shared_policy_init(struct shared_policy *info, int policy, + nodemask_t *nodes); int mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *new); @@ -153,6 +141,40 @@ void mpol_free_shared_policy(struct shared_policy *p); struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx); +extern void numa_default_policy(void); +extern void numa_policy_init(void); +extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new); +extern void mpol_rebind_task(struct task_struct *tsk, + const nodemask_t *new); +extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); +extern void mpol_fix_fork_child_flag(struct task_struct *p); +#define set_cpuset_being_rebound(x) (cpuset_being_rebound = (x)) + +#ifdef CONFIG_CPUSETS +#define current_cpuset_is_being_rebound() \ + (cpuset_being_rebound == current->cpuset) +#else +#define current_cpuset_is_being_rebound() 0 +#endif + +extern struct mempolicy default_policy; +extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, + unsigned long addr); +extern unsigned slab_node(struct mempolicy *policy); + +extern enum zone_type policy_zone; + +static inline void check_highest_zone(enum zone_type k) +{ + if (k > policy_zone) + policy_zone = k; +} + +int do_migrate_pages(struct mm_struct *mm, + const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); + +extern void *cpuset_being_rebound; /* Trigger mpol_copy vma rebind */ + #else struct mempolicy {}; @@ -178,17 +200,6 @@ static inline struct mempolicy *mpol_copy(struct mempolicy *old) return NULL; } -static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a) -{ - return numa_node_id(); -} - -static inline int -mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a) -{ - return 1; -} - struct shared_policy {}; static inline int mpol_set_shared_policy(struct shared_policy *info, @@ -198,7 +209,8 @@ static inline int mpol_set_shared_policy(struct shared_policy *info, return -EINVAL; } -static inline void mpol_shared_policy_init(struct shared_policy *info) +static inline void mpol_shared_policy_init(struct shared_policy *info, + int policy, nodemask_t *nodes) { } @@ -215,6 +227,50 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) #define vma_policy(vma) NULL #define vma_set_policy(vma, pol) do {} while(0) +static inline void numa_policy_init(void) +{ +} + +static inline void numa_default_policy(void) +{ +} + +static inline void mpol_rebind_policy(struct mempolicy *pol, + const nodemask_t *new) +{ +} + +static inline void mpol_rebind_task(struct task_struct *tsk, + const nodemask_t *new) +{ +} + +static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) +{ +} + +static inline void mpol_fix_fork_child_flag(struct task_struct *p) +{ +} + +#define set_cpuset_being_rebound(x) do {} while (0) + +static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, + unsigned long addr) +{ + return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER); +} + +static inline int do_migrate_pages(struct mm_struct *mm, + const nodemask_t *from_nodes, + const nodemask_t *to_nodes, int flags) +{ + return 0; +} + +static inline void check_highest_zone(int k) +{ +} #endif /* CONFIG_NUMA */ #endif /* __KERNEL__ */