Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / kernel / futex_compat.c
1 /*
2  * linux/kernel/futex_compat.c
3  *
4  * Futex compatibililty routines.
5  *
6  * Copyright 2006, Red Hat, Inc., Ingo Molnar
7  */
8
9 #include <linux/linkage.h>
10 #include <linux/compat.h>
11 #include <linux/futex.h>
12 #include <linux/vs_cvirt.h>
13
14 #include <asm/uaccess.h>
15
16 /*
17  * Walk curr->robust_list (very carefully, it's a userspace list!)
18  * and mark any locks found there dead, and notify any waiters.
19  *
20  * We silently return on any sign of list-walking problem.
21  */
22 void compat_exit_robust_list(struct task_struct *curr)
23 {
24         struct compat_robust_list_head __user *head = curr->compat_robust_list;
25         struct robust_list __user *entry, *pending;
26         compat_uptr_t uentry, upending;
27         unsigned int limit = ROBUST_LIST_LIMIT;
28         compat_long_t futex_offset;
29
30         /*
31          * Fetch the list head (which was registered earlier, via
32          * sys_set_robust_list()):
33          */
34         if (get_user(uentry, &head->list.next))
35                 return;
36         entry = compat_ptr(uentry);
37         /*
38          * Fetch the relative futex offset:
39          */
40         if (get_user(futex_offset, &head->futex_offset))
41                 return;
42         /*
43          * Fetch any possibly pending lock-add first, and handle it
44          * if it exists:
45          */
46         if (get_user(upending, &head->list_op_pending))
47                 return;
48         pending = compat_ptr(upending);
49         if (upending)
50                 handle_futex_death((void *)pending + futex_offset, curr);
51
52         while (compat_ptr(uentry) != &head->list) {
53                 /*
54                  * A pending lock might already be on the list, so
55                  * dont process it twice:
56                  */
57                 if (entry != pending)
58                         if (handle_futex_death((void *)entry + futex_offset,
59                                                 curr))
60                                 return;
61
62                 /*
63                  * Fetch the next entry in the list:
64                  */
65                 if (get_user(uentry, (compat_uptr_t *)&entry->next))
66                         return;
67                 entry = compat_ptr(uentry);
68                 /*
69                  * Avoid excessively long or circular lists:
70                  */
71                 if (!--limit)
72                         break;
73
74                 cond_resched();
75         }
76 }
77
78 asmlinkage long
79 compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
80                            compat_size_t len)
81 {
82         if (unlikely(len != sizeof(*head)))
83                 return -EINVAL;
84
85         current->compat_robust_list = head;
86
87         return 0;
88 }
89
90 asmlinkage long
91 compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr,
92                            compat_size_t __user *len_ptr)
93 {
94         struct compat_robust_list_head *head;
95         unsigned long ret;
96
97         if (!pid)
98                 head = current->compat_robust_list;
99         else {
100                 struct task_struct *p;
101
102                 ret = -ESRCH;
103                 read_lock(&tasklist_lock);
104                 p = find_task_by_pid(pid);
105                 if (!p)
106                         goto err_unlock;
107                 ret = -EPERM;
108                 if ((current->euid != p->euid) && (current->euid != p->uid) &&
109                                 !capable(CAP_SYS_PTRACE))
110                         goto err_unlock;
111                 head = p->compat_robust_list;
112                 read_unlock(&tasklist_lock);
113         }
114
115         if (put_user(sizeof(*head), len_ptr))
116                 return -EFAULT;
117         return put_user(ptr_to_compat(head), head_ptr);
118
119 err_unlock:
120         read_unlock(&tasklist_lock);
121
122         return ret;
123 }
124
125 asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
126                 struct compat_timespec __user *utime, u32 __user *uaddr2,
127                 u32 val3)
128 {
129         struct timespec t;
130         unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
131         int val2 = 0;
132
133         if (utime && (op == FUTEX_WAIT)) {
134                 if (get_compat_timespec(&t, utime))
135                         return -EFAULT;
136                 if (!timespec_valid(&t))
137                         return -EINVAL;
138                 timeout = timespec_to_jiffies(&t) + 1;
139         }
140         if (op >= FUTEX_REQUEUE)
141                 val2 = (int) (unsigned long) utime;
142
143         return do_futex((unsigned long)uaddr, op, val, timeout,
144                         (unsigned long)uaddr2, val2, val3);
145 }