2 * Fast Userspace Mutexes (which I call "Futexes!").
3 * (C) Rusty Russell, IBM 2002
5 * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6 * (C) Copyright 2003 Red Hat Inc, All Rights Reserved
8 * Removed page pinning, fix privately mapped COW pages and other cleanups
9 * (C) Copyright 2003 Jamie Lokier
11 * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
12 * enough at me, Linus for the original (flawed) idea, Matthew
13 * Kirkwood for proof-of-concept implementation.
15 * "The futexes are also cursed."
16 * "But they come in a choice of three flavours!"
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 #include <linux/slab.h>
33 #include <linux/poll.h>
35 #include <linux/file.h>
36 #include <linux/jhash.h>
37 #include <linux/init.h>
38 #include <linux/futex.h>
39 #include <linux/mount.h>
40 #include <linux/pagemap.h>
42 #define FUTEX_HASHBITS 8
45 * Futexes are matched on equal values of this key.
46 * The key type depends on whether it's a shared or private mapping.
47 * Don't rearrange members without looking at hash_futex().
49 * offset is aligned to a multiple of sizeof(u32) (== 4) by definition.
50 * We set bit 0 to indicate if it's an inode-based key.
71 * We use this hashed waitqueue instead of a normal wait_queue_t, so
72 * we can wake only the relevant ones (hashed queues may be shared).
74 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
75 * It is considered woken when list_empty(&q->list) || q->lock_ptr == 0.
76 * The order of wakup is always to make the first condition true, then
77 * wake up q->waiters, then make the second condition true.
80 struct list_head list;
81 wait_queue_head_t waiters;
83 /* Which hash list lock to use. */
86 /* Key which the futex is hashed on. */
89 /* For fd, sigio sent using these. */
95 * Split the global futex_lock into every hash list lock.
97 struct futex_hash_bucket {
100 struct list_head chain;
103 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
105 /* Futex-fs vfsmount entry: */
106 static struct vfsmount *futex_mnt;
109 * We hash on the keys returned from get_futex_key (see below).
111 static struct futex_hash_bucket *hash_futex(union futex_key *key)
113 u32 hash = jhash2((u32*)&key->both.word,
114 (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
116 return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
120 * Return 1 if two futex_keys are equal, 0 otherwise.
122 static inline int match_futex(union futex_key *key1, union futex_key *key2)
124 return (key1->both.word == key2->both.word
125 && key1->both.ptr == key2->both.ptr
126 && key1->both.offset == key2->both.offset);
130 * Get parameters which are the keys for a futex.
132 * For shared mappings, it's (page->index, vma->vm_file->f_dentry->d_inode,
133 * offset_within_page). For private mappings, it's (uaddr, current->mm).
134 * We can usually work out the index without swapping in the page.
136 * Returns: 0, or negative error code.
137 * The key words are stored in *key on success.
139 * Should be called with ¤t->mm->mmap_sem but NOT any spinlocks.
141 static int get_futex_key(unsigned long uaddr, union futex_key *key)
143 struct mm_struct *mm = current->mm;
144 struct vm_area_struct *vma;
149 * The futex address must be "naturally" aligned.
151 key->both.offset = uaddr % PAGE_SIZE;
152 if (unlikely((key->both.offset % sizeof(u32)) != 0))
154 uaddr -= key->both.offset;
157 * The futex is hashed differently depending on whether
158 * it's in a shared or private mapping. So check vma first.
160 vma = find_extend_vma(mm, uaddr);
167 if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
168 return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
171 * Private mappings are handled in a simple way.
173 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
174 * it's a read-only handle, it's expected that futexes attach to
175 * the object not the particular process. Therefore we use
176 * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
177 * mappings of _writable_ handles.
179 if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
180 key->private.mm = mm;
181 key->private.uaddr = uaddr;
186 * Linear file mappings are also simple.
188 key->shared.inode = vma->vm_file->f_dentry->d_inode;
189 key->both.offset++; /* Bit 0 of offset indicates inode-based key. */
190 if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
191 key->shared.pgoff = (((uaddr - vma->vm_start) >> PAGE_SHIFT)
197 * We could walk the page table to read the non-linear
198 * pte, and get the page index without fetching the page
199 * from swap. But that's a lot of code to duplicate here
200 * for a rare case, so we simply fetch the page.
204 * Do a quick atomic lookup first - this is the fastpath.
206 spin_lock(¤t->mm->page_table_lock);
207 page = follow_page(mm, uaddr, 0);
208 if (likely(page != NULL)) {
210 page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
211 spin_unlock(¤t->mm->page_table_lock);
214 spin_unlock(¤t->mm->page_table_lock);
217 * Do it the general way.
219 err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL);
222 page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
230 * Take a reference to the resource addressed by a key.
231 * Can be called while holding spinlocks.
233 * NOTE: mmap_sem MUST be held between get_futex_key() and calling this
234 * function, if it is called at all. mmap_sem keeps key->shared.inode valid.
236 static inline void get_key_refs(union futex_key *key)
238 if (key->both.ptr != 0) {
239 if (key->both.offset & 1)
240 atomic_inc(&key->shared.inode->i_count);
242 atomic_inc(&key->private.mm->mm_count);
247 * Drop a reference to the resource addressed by a key.
248 * The hash bucket spinlock must not be held.
250 static void drop_key_refs(union futex_key *key)
252 if (key->both.ptr != 0) {
253 if (key->both.offset & 1)
254 iput(key->shared.inode);
256 mmdrop(key->private.mm);
261 * The hash bucket lock must be held when this is called.
262 * Afterwards, the futex_q must not be accessed.
264 static void wake_futex(struct futex_q *q)
266 list_del_init(&q->list);
268 send_sigio(&q->filp->f_owner, q->fd, POLL_IN);
270 * The lock in wake_up_all() is a crucial memory barrier after the
271 * list_del_init() and also before assigning to q->lock_ptr.
273 wake_up_all(&q->waiters);
275 * The waiting task can free the futex_q as soon as this is written,
276 * without taking any locks. This must come last.
282 * Wake up all waiters hashed on the physical page that is mapped
283 * to this virtual address:
285 static int futex_wake(unsigned long uaddr, int nr_wake)
288 struct futex_hash_bucket *bh;
289 struct list_head *head;
290 struct futex_q *this, *next;
293 down_read(¤t->mm->mmap_sem);
295 ret = get_futex_key(uaddr, &key);
296 if (unlikely(ret != 0))
299 bh = hash_futex(&key);
300 spin_lock(&bh->lock);
303 list_for_each_entry_safe(this, next, head, list) {
304 if (match_futex (&this->key, &key)) {
306 if (++ret >= nr_wake)
311 spin_unlock(&bh->lock);
313 up_read(¤t->mm->mmap_sem);
318 * Requeue all waiters hashed on one physical page to another
321 static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2,
322 int nr_wake, int nr_requeue, int *valp)
324 union futex_key key1, key2;
325 struct futex_hash_bucket *bh1, *bh2;
326 struct list_head *head1;
327 struct futex_q *this, *next;
328 int ret, drop_count = 0;
329 unsigned int nqueued;
331 down_read(¤t->mm->mmap_sem);
333 ret = get_futex_key(uaddr1, &key1);
334 if (unlikely(ret != 0))
336 ret = get_futex_key(uaddr2, &key2);
337 if (unlikely(ret != 0))
340 bh1 = hash_futex(&key1);
341 bh2 = hash_futex(&key2);
343 nqueued = bh1->nqueued;
344 if (likely(valp != NULL)) {
347 /* In order to avoid doing get_user while
348 holding bh1->lock and bh2->lock, nqueued
349 (monotonically increasing field) must be first
350 read, then *uaddr1 fetched from userland and
351 after acquiring lock nqueued field compared with
352 the stored value. The smp_mb () below
353 makes sure that bh1->nqueued is read from memory
357 if (get_user(curval, (int *)uaddr1) != 0) {
361 if (curval != *valp) {
368 spin_lock(&bh1->lock);
369 spin_lock(&bh2->lock);
371 spin_lock(&bh1->lock);
373 if (unlikely(nqueued != bh1->nqueued && valp != NULL)) {
379 list_for_each_entry_safe(this, next, head1, list) {
380 if (!match_futex (&this->key, &key1))
382 if (++ret <= nr_wake) {
385 list_move_tail(&this->list, &bh2->chain);
386 this->lock_ptr = &bh2->lock;
391 if (ret - nr_wake >= nr_requeue)
393 /* Make sure to stop if key1 == key2 */
394 if (head1 == &bh2->chain && head1 != &next->list)
400 spin_unlock(&bh1->lock);
402 spin_unlock(&bh2->lock);
404 /* drop_key_refs() must be called outside the spinlocks. */
405 while (--drop_count >= 0)
406 drop_key_refs(&key1);
409 up_read(¤t->mm->mmap_sem);
414 * queue_me and unqueue_me must be called as a pair, each
415 * exactly once. They are called with the hashed spinlock held.
418 /* The key must be already stored in q->key. */
419 static void queue_me(struct futex_q *q, int fd, struct file *filp)
421 struct futex_hash_bucket *bh;
426 init_waitqueue_head(&q->waiters);
428 get_key_refs(&q->key);
429 bh = hash_futex(&q->key);
430 q->lock_ptr = &bh->lock;
432 spin_lock(&bh->lock);
434 list_add_tail(&q->list, &bh->chain);
435 spin_unlock(&bh->lock);
438 /* Return 1 if we were still queued (ie. 0 means we were woken) */
439 static int unqueue_me(struct futex_q *q)
442 spinlock_t *lock_ptr;
444 /* In the common case we don't take the spinlock, which is nice. */
446 lock_ptr = q->lock_ptr;
450 * q->lock_ptr can change between reading it and
451 * spin_lock(), causing us to take the wrong lock. This
452 * corrects the race condition.
454 * Reasoning goes like this: if we have the wrong lock,
455 * q->lock_ptr must have changed (maybe several times)
456 * between reading it and the spin_lock(). It can
457 * change again after the spin_lock() but only if it was
458 * already changed before the spin_lock(). It cannot,
459 * however, change back to the original value. Therefore
460 * we can detect whether we acquired the correct lock.
462 if (unlikely(lock_ptr != q->lock_ptr)) {
463 spin_unlock(lock_ptr);
466 WARN_ON(list_empty(&q->list));
468 spin_unlock(lock_ptr);
472 drop_key_refs(&q->key);
476 static int futex_wait(unsigned long uaddr, int val, unsigned long time)
478 DECLARE_WAITQUEUE(wait, current);
482 down_read(¤t->mm->mmap_sem);
484 ret = get_futex_key(uaddr, &q.key);
485 if (unlikely(ret != 0))
486 goto out_release_sem;
488 queue_me(&q, -1, NULL);
491 * Access the page after the futex is queued.
492 * We hold the mmap semaphore, so the mapping cannot have changed
493 * since we looked it up.
495 if (get_user(curval, (int __user *)uaddr) != 0) {
505 * Now the futex is queued and we have checked the data, we
506 * don't want to hold mmap_sem while we sleep.
508 up_read(¤t->mm->mmap_sem);
511 * There might have been scheduling since the queue_me(), as we
512 * cannot hold a spinlock across the get_user() in case it
513 * faults, and we cannot just set TASK_INTERRUPTIBLE state when
514 * queueing ourselves into the futex hash. This code thus has to
515 * rely on the futex_wake() code removing us from hash when it
519 /* add_wait_queue is the barrier after __set_current_state. */
520 __set_current_state(TASK_INTERRUPTIBLE);
521 add_wait_queue(&q.waiters, &wait);
523 * !list_empty() is safe here without any lock.
524 * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
526 if (likely(!list_empty(&q.list)))
527 time = schedule_timeout(time);
528 __set_current_state(TASK_RUNNING);
531 * NOTE: we don't remove ourselves from the waitqueue because
532 * we are the only user of it.
535 /* If we were woken (and unqueued), we succeeded, whatever. */
540 /* A spurious wakeup should never happen. */
541 WARN_ON(!signal_pending(current));
545 /* If we were woken (and unqueued), we succeeded, whatever. */
549 up_read(¤t->mm->mmap_sem);
553 static int futex_close(struct inode *inode, struct file *filp)
555 struct futex_q *q = filp->private_data;
562 /* This is one-shot: once it's gone off you need a new fd */
563 static unsigned int futex_poll(struct file *filp,
564 struct poll_table_struct *wait)
566 struct futex_q *q = filp->private_data;
569 poll_wait(filp, &q->waiters, wait);
572 * list_empty() is safe here without any lock.
573 * q->lock_ptr != 0 is not safe, because of ordering against wakeup.
575 if (list_empty(&q->list))
576 ret = POLLIN | POLLRDNORM;
581 static struct file_operations futex_fops = {
582 .release = futex_close,
587 * Signal allows caller to avoid the race which would occur if they
588 * set the sigio stuff up afterwards.
590 static int futex_fd(unsigned long uaddr, int signal)
597 if (signal < 0 || signal > _NSIG)
600 ret = get_unused_fd();
603 filp = get_empty_filp();
609 filp->f_op = &futex_fops;
610 filp->f_vfsmnt = mntget(futex_mnt);
611 filp->f_dentry = dget(futex_mnt->mnt_root);
612 filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
616 err = f_setown(filp, current->pid, 1);
623 filp->f_owner.signum = signal;
626 q = kmalloc(sizeof(*q), GFP_KERNEL);
634 down_read(¤t->mm->mmap_sem);
635 err = get_futex_key(uaddr, &q->key);
637 if (unlikely(err != 0)) {
638 up_read(¤t->mm->mmap_sem);
646 * queue_me() must be called before releasing mmap_sem, because
647 * key->shared.inode needs to be referenced while holding it.
649 filp->private_data = q;
651 queue_me(q, ret, filp);
652 up_read(¤t->mm->mmap_sem);
654 /* Now we map fd to filp, so userspace can access it */
655 fd_install(ret, filp);
660 long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout,
661 unsigned long uaddr2, int val2, int val3)
667 ret = futex_wait(uaddr, val, timeout);
670 ret = futex_wake(uaddr, val);
673 /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */
674 ret = futex_fd(uaddr, val);
677 ret = futex_requeue(uaddr, uaddr2, val, val2, NULL);
679 case FUTEX_CMP_REQUEUE:
680 ret = futex_requeue(uaddr, uaddr2, val, val2, &val3);
689 asmlinkage long sys_futex(u32 __user *uaddr, int op, int val,
690 struct timespec __user *utime, u32 __user *uaddr2,
694 unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
697 if ((op == FUTEX_WAIT) && utime) {
698 if (copy_from_user(&t, utime, sizeof(t)) != 0)
700 timeout = timespec_to_jiffies(&t) + 1;
703 * requeue parameter in 'utime' if op == FUTEX_REQUEUE.
705 if (op >= FUTEX_REQUEUE)
706 val2 = (int) (long) utime;
708 return do_futex((unsigned long)uaddr, op, val, timeout,
709 (unsigned long)uaddr2, val2, val3);
712 static struct super_block *
713 futexfs_get_sb(struct file_system_type *fs_type,
714 int flags, const char *dev_name, void *data)
716 return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA);
719 static struct file_system_type futex_fs_type = {
721 .get_sb = futexfs_get_sb,
722 .kill_sb = kill_anon_super,
725 static int __init init(void)
729 register_filesystem(&futex_fs_type);
730 futex_mnt = kern_mount(&futex_fs_type);
732 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
733 INIT_LIST_HEAD(&futex_queues[i].chain);
734 futex_queues[i].lock = SPIN_LOCK_UNLOCKED;