#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
-#include <linux/nodemask.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/mempolicy.h>
-#include <asm/tlbflush.h>
#include <asm/uaccess.h>
static kmem_cache_t *policy_cache;
{
DECLARE_BITMAP(online2, MAX_NUMNODES);
- bitmap_copy(online2, nodes_addr(node_online_map), MAX_NUMNODES);
+ bitmap_copy(online2, node_online_map, MAX_NUMNODES);
if (bitmap_empty(online2, MAX_NUMNODES))
set_bit(0, online2);
if (!bitmap_subset(nodes, online2, MAX_NUMNODES))
case MPOL_PREFERRED:
/* or use current node instead of online map? */
if (p->v.preferred_node < 0)
- bitmap_copy(nodes, nodes_addr(node_online_map), MAX_NUMNODES);
+ bitmap_copy(nodes, node_online_map, MAX_NUMNODES);
else
__set_bit(p->v.preferred_node, nodes);
break;
#ifdef CONFIG_COMPAT
-asmlinkage long compat_sys_get_mempolicy(int __user *policy,
+asmlinkage long compat_get_mempolicy(int __user *policy,
compat_ulong_t __user *nmask,
compat_ulong_t maxnode,
compat_ulong_t addr, compat_ulong_t flags)
return err;
}
-asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask,
+asmlinkage long compat_set_mempolicy(int mode, compat_ulong_t __user *nmask,
compat_ulong_t maxnode)
{
long err = 0;
return sys_set_mempolicy(mode, nm, nr_bits+1);
}
-asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
+asmlinkage long compat_mbind(compat_ulong_t start, compat_ulong_t len,
compat_ulong_t mode, compat_ulong_t __user *nmask,
compat_ulong_t maxnode, compat_ulong_t flags)
{
struct zonelist *zl;
struct page *page;
- BUG_ON(!node_online(nid));
+ BUG_ON(!test_bit(nid, node_online_map));
zl = NODE_DATA(nid)->node_zonelists + (gfp & GFP_ZONEMASK);
page = __alloc_pages(gfp, order, zl);
if (page && page_zone(page) == zl->zones[0]) {
*
* Remember policies even when nobody has shared memory mapped.
* The policies are kept in Red-Black tree linked from the inode.
- * They are protected by the sp->lock spinlock, which should be held
+ * They are protected by the sp->sem semaphore, which should be held
* for any accesses to the tree.
*/
/* lookup first element intersecting start-end */
-/* Caller holds sp->lock */
+/* Caller holds sp->sem */
static struct sp_node *
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
{
while (n) {
struct sp_node *p = rb_entry(n, struct sp_node, nd);
-
- if (start >= p->end)
+ if (start >= p->end) {
n = n->rb_right;
- else if (end <= p->start)
+ } else if (end < p->start) {
n = n->rb_left;
- else
+ } else {
break;
+ }
}
if (!n)
return NULL;
}
/* Insert a new shared policy into the list. */
-/* Caller holds sp->lock */
+/* Caller holds sp->sem */
static void sp_insert(struct shared_policy *sp, struct sp_node *new)
{
struct rb_node **p = &sp->root.rb_node;
struct mempolicy *pol = NULL;
struct sp_node *sn;
- if (!sp->root.rb_node)
- return NULL;
- spin_lock(&sp->lock);
+ down(&sp->sem);
sn = sp_lookup(sp, idx, idx+1);
if (sn) {
mpol_get(sn->policy);
pol = sn->policy;
}
- spin_unlock(&sp->lock);
+ up(&sp->sem);
return pol;
}
static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
unsigned long end, struct sp_node *new)
{
- struct sp_node *n, *new2 = NULL;
+ struct sp_node *n, *new2;
-restart:
- spin_lock(&sp->lock);
+ down(&sp->sem);
n = sp_lookup(sp, start, end);
/* Take care of old policies in the same range. */
while (n && n->start < end) {
} else {
/* Old policy spanning whole new range. */
if (n->end > end) {
+ new2 = sp_alloc(end, n->end, n->policy);
if (!new2) {
- spin_unlock(&sp->lock);
- new2 = sp_alloc(end, n->end, n->policy);
- if (!new2)
- return -ENOMEM;
- goto restart;
+ up(&sp->sem);
+ return -ENOMEM;
}
n->end = end;
sp_insert(sp, new2);
- new2 = NULL;
}
/* Old crossing beginning, but not end (easy) */
if (n->start < start && n->end > start)
}
if (new)
sp_insert(sp, new);
- spin_unlock(&sp->lock);
- if (new2) {
- mpol_free(new2->policy);
- kmem_cache_free(sn_cache, new2);
- }
+ up(&sp->sem);
return 0;
}
struct sp_node *n;
struct rb_node *next;
- if (!p->root.rb_node)
- return;
- spin_lock(&p->lock);
+ down(&p->sem);
next = rb_first(&p->root);
while (next) {
n = rb_entry(next, struct sp_node, nd);
mpol_free(n->policy);
kmem_cache_free(sn_cache, n);
}
- spin_unlock(&p->lock);
+ up(&p->sem);
}
/* assumes fs == KERNEL_DS */
/* Set interleaving policy for system init. This way not all
the data structures allocated at system boot end up in node zero. */
- if (sys_set_mempolicy(MPOL_INTERLEAVE, nodes_addr(node_online_map),
- MAX_NUMNODES) < 0)
+ if (sys_set_mempolicy(MPOL_INTERLEAVE, node_online_map, MAX_NUMNODES) < 0)
printk("numa_policy_init: interleaving failed\n");
}