X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Flinux%2Fmempolicy.h;fp=include%2Flinux%2Fmempolicy.h;h=f5fdca1d67e6fbab0fc5ff7785b8fec0547be76b;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=8480aef10e62fa41d986751a651ffd090db4ceb7;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 8480aef10..f5fdca1d6 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -22,17 +22,21 @@ /* Flags for mbind */ #define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */ +#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */ +#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */ +#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */ #ifdef __KERNEL__ #include #include -#include #include #include #include +#include struct vm_area_struct; +struct mm_struct; #ifdef CONFIG_NUMA @@ -47,8 +51,7 @@ struct vm_area_struct; * Locking policy for interlave: * In process context there is no locking because only the process accesses * its own state. All vma manipulation is somewhat protected by a down_read on - * mmap_sem. For allocating in the interleave policy the page_table_lock - * must be also aquired to protect il_next. + * mmap_sem. * * Freeing policy: * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd. @@ -63,9 +66,10 @@ struct mempolicy { union { struct zonelist *zonelist; /* bind */ short preferred_node; /* preferred */ - DECLARE_BITMAP(nodes, MAX_NUMNODES); /* interleave */ + nodemask_t nodes; /* interleave */ /* undefined for default */ } v; + nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */ }; /* @@ -110,14 +114,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b) #define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL) -/* - * Hugetlb policy. i386 hugetlb so far works with node numbers - * instead of zone lists, so give it special interfaces for now. - */ -extern int mpol_first_node(struct vm_area_struct *vma, unsigned long addr); -extern int mpol_node_valid(int nid, struct vm_area_struct *vma, - unsigned long addr); - /* * Tree of shared policies for a shared memory region. * Maintain the policies in a pseudo mm that contains vmas. The vmas @@ -137,12 +133,8 @@ struct shared_policy { spinlock_t lock; }; -static inline void mpol_shared_policy_init(struct shared_policy *info) -{ - info->root = RB_ROOT; - spin_lock_init(&info->lock); -} - +void mpol_shared_policy_init(struct shared_policy *info, int policy, + nodemask_t *nodes); int mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *new); @@ -152,6 +144,37 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, extern void numa_default_policy(void); extern void numa_policy_init(void); +extern void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *new); +extern void mpol_rebind_task(struct task_struct *tsk, + const nodemask_t *new); +extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); +extern void mpol_fix_fork_child_flag(struct task_struct *p); +#define set_cpuset_being_rebound(x) (cpuset_being_rebound = (x)) + +#ifdef CONFIG_CPUSET +#define current_cpuset_is_being_rebound() \ + (cpuset_being_rebound == current->cpuset) +#else +#define current_cpuset_is_being_rebound() 0 +#endif + +extern struct mempolicy default_policy; +extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, + unsigned long addr); +extern unsigned slab_node(struct mempolicy *policy); + +extern int policy_zone; + +static inline void check_highest_zone(int k) +{ + if (k > policy_zone) + policy_zone = k; +} + +int do_migrate_pages(struct mm_struct *mm, + const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); + +extern void *cpuset_being_rebound; /* Trigger mpol_copy vma rebind */ #else @@ -178,17 +201,6 @@ static inline struct mempolicy *mpol_copy(struct mempolicy *old) return NULL; } -static inline int mpol_first_node(struct vm_area_struct *vma, unsigned long a) -{ - return numa_node_id(); -} - -static inline int -mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long a) -{ - return 1; -} - struct shared_policy {}; static inline int mpol_set_shared_policy(struct shared_policy *info, @@ -198,7 +210,8 @@ static inline int mpol_set_shared_policy(struct shared_policy *info, return -EINVAL; } -static inline void mpol_shared_policy_init(struct shared_policy *info) +static inline void mpol_shared_policy_init(struct shared_policy *info, + int policy, nodemask_t *nodes) { } @@ -223,6 +236,42 @@ static inline void numa_default_policy(void) { } +static inline void mpol_rebind_policy(struct mempolicy *pol, + const nodemask_t *new) +{ +} + +static inline void mpol_rebind_task(struct task_struct *tsk, + const nodemask_t *new) +{ +} + +static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) +{ +} + +static inline void mpol_fix_fork_child_flag(struct task_struct *p) +{ +} + +#define set_cpuset_being_rebound(x) do {} while (0) + +static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, + unsigned long addr) +{ + return NODE_DATA(0)->node_zonelists + gfp_zone(GFP_HIGHUSER); +} + +static inline int do_migrate_pages(struct mm_struct *mm, + const nodemask_t *from_nodes, + const nodemask_t *to_nodes, int flags) +{ + return 0; +} + +static inline void check_highest_zone(int k) +{ +} #endif /* CONFIG_NUMA */ #endif /* __KERNEL__ */