This commit was manufactured by cvs2svn to create branch
[linux-2.6.git] / mm / usercopy.c
1 /*
2  * linux/mm/usercopy.c
3  *
4  * (C) Copyright 2003 Ingo Molnar
5  *
6  * Generic implementation of all the user-VM access functions, without
7  * relying on being able to access the VM directly.
8  */
9
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/errno.h>
13 #include <linux/mm.h>
14 #include <linux/highmem.h>
15 #include <linux/pagemap.h>
16 #include <linux/smp_lock.h>
17 #include <linux/ptrace.h>
18 #include <linux/interrupt.h>
19
20 #include <asm/pgtable.h>
21 #include <asm/uaccess.h>
22 #include <asm/atomic_kmap.h>
23
24 /*
25  * Get kernel address of the user page and pin it.
26  */
27 static inline struct page *pin_page(unsigned long addr, int write,
28                                         unsigned long *pfn)
29 {
30         struct mm_struct *mm = current->mm ? : &init_mm;
31         struct page *page = NULL;
32         int ret;
33
34         /*
35          * Do a quick atomic lookup first - this is the fastpath.
36          */
37 retry:
38         page = follow_page_pfn(mm, addr, write, pfn);
39         if (likely(page != NULL)) {
40                 if (!PageReserved(page))
41                         get_page(page);
42                 return page;
43         }
44         if (*pfn)
45                 return NULL;
46         /*
47          * No luck - bad address or need to fault in the page:
48          */
49
50         /* Release the lock so get_user_pages can sleep */
51         spin_unlock(&mm->page_table_lock);
52
53         /*
54          * In the context of filemap_copy_from_user(), we are not allowed
55          * to sleep.  We must fail this usercopy attempt and allow
56          * filemap_copy_from_user() to recover: drop its atomic kmap and use
57          * a sleeping kmap instead.
58          */
59         if (in_atomic()) {
60                 spin_lock(&mm->page_table_lock);
61                 return NULL;
62         }
63
64         down_read(&mm->mmap_sem);
65         ret = get_user_pages(current, mm, addr, 1, write, 0, NULL, NULL);
66         up_read(&mm->mmap_sem);
67         spin_lock(&mm->page_table_lock);
68
69         if (ret <= 0)
70                 return NULL;
71
72         /*
73          * Go try the follow_page again.
74          */
75         goto retry;
76 }
77
78 static inline void unpin_page(struct page *page)
79 {
80         put_page(page);
81 }
82
83 /*
84  * Access another process' address space.
85  * Source/target buffer must be kernel space,
86  * Do not walk the page table directly, use get_user_pages
87  */
88 static int rw_vm(unsigned long addr, void *buf, int len, int write)
89 {
90         struct mm_struct *mm = current->mm ? : &init_mm;
91
92         if (!len)
93                 return 0;
94
95         spin_lock(&mm->page_table_lock);
96
97         /* ignore errors, just check how much was sucessfully transfered */
98         while (len) {
99                 struct page *page = NULL;
100                 unsigned long pfn = 0;
101                 int bytes, offset;
102                 void *maddr;
103
104                 page = pin_page(addr, write, &pfn);
105                 if (!page && !pfn)
106                         break;
107
108                 bytes = len;
109                 offset = addr & (PAGE_SIZE-1);
110                 if (bytes > PAGE_SIZE-offset)
111                         bytes = PAGE_SIZE-offset;
112
113                 if (page)
114                         maddr = kmap_atomic(page, KM_USER_COPY);
115                 else
116                         maddr = kmap_atomic_nocache_pfn(pfn, KM_USER_COPY);
117
118 #define HANDLE_TYPE(type) \
119         case sizeof(type): *(type *)(maddr+offset) = *(type *)(buf); break;
120
121                 if (write) {
122                         switch (bytes) {
123                         HANDLE_TYPE(char);
124                         HANDLE_TYPE(int);
125                         HANDLE_TYPE(long long);
126                         default:
127                                 memcpy(maddr + offset, buf, bytes);
128                         }
129                 } else {
130 #undef HANDLE_TYPE
131 #define HANDLE_TYPE(type) \
132         case sizeof(type): *(type *)(buf) = *(type *)(maddr+offset); break;
133                         switch (bytes) {
134                         HANDLE_TYPE(char);
135                         HANDLE_TYPE(int);
136                         HANDLE_TYPE(long long);
137                         default:
138                                 memcpy(buf, maddr + offset, bytes);
139                         }
140 #undef HANDLE_TYPE
141                 }
142                 kunmap_atomic(maddr, KM_USER_COPY);
143                 if (page)
144                         unpin_page(page);
145                 len -= bytes;
146                 buf += bytes;
147                 addr += bytes;
148         }
149         spin_unlock(&mm->page_table_lock);
150
151         return len;
152 }
153
154 static int str_vm(unsigned long addr, void *buf0, int len, int copy)
155 {
156         struct mm_struct *mm = current->mm ? : &init_mm;
157         struct page *page;
158         void *buf = buf0;
159
160         if (!len)
161                 return len;
162
163         spin_lock(&mm->page_table_lock);
164
165         /* ignore errors, just check how much was sucessfully transfered */
166         while (len) {
167                 int bytes, offset, left, copied;
168                 unsigned long pfn = 0;
169                 char *maddr;
170
171                 page = pin_page(addr, copy == 2, &pfn);
172                 if (!page && !pfn) {
173                         spin_unlock(&mm->page_table_lock);
174                         return -EFAULT;
175                 }
176                 bytes = len;
177                 offset = addr & (PAGE_SIZE-1);
178                 if (bytes > PAGE_SIZE-offset)
179                         bytes = PAGE_SIZE-offset;
180
181                 if (page)
182                         maddr = kmap_atomic(page, KM_USER_COPY);
183                 else
184                         maddr = kmap_atomic_nocache_pfn(pfn, KM_USER_COPY);
185                 if (copy == 2) {
186                         memset(maddr + offset, 0, bytes);
187                         copied = bytes;
188                         left = 0;
189                 } else if (copy == 1) {
190                         left = strncpy_count(buf, maddr + offset, bytes);
191                         copied = bytes - left;
192                 } else {
193                         copied = strnlen(maddr + offset, bytes);
194                         left = bytes - copied;
195                 }
196                 BUG_ON(bytes < 0 || copied < 0);
197                 kunmap_atomic(maddr, KM_USER_COPY);
198                 if (page)
199                         unpin_page(page);
200                 len -= copied;
201                 buf += copied;
202                 addr += copied;
203                 if (left)
204                         break;
205         }
206         spin_unlock(&mm->page_table_lock);
207
208         return len;
209 }
210
211 /*
212  * Copies memory from userspace (ptr) into kernelspace (val).
213  *
214  * returns # of bytes not copied.
215  */
216 int get_user_size(unsigned int size, void *val, const void *ptr)
217 {
218         int ret;
219
220         if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
221                 ret = __direct_copy_from_user(val, ptr, size);
222         else
223                 ret = rw_vm((unsigned long)ptr, val, size, 0);
224         if (ret)
225                 /*
226                  * Zero the rest:
227                  */
228                 memset(val + size - ret, 0, ret);
229         return ret;
230 }
231
232 /*
233  * Copies memory from kernelspace (val) into userspace (ptr).
234  *
235  * returns # of bytes not copied.
236  */
237 int put_user_size(unsigned int size, const void *val, void *ptr)
238 {
239         if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
240                 return __direct_copy_to_user(ptr, val, size);
241         else
242                 return rw_vm((unsigned long)ptr, (void *)val, size, 1);
243 }
244
245 int copy_str_fromuser_size(unsigned int size, void *val, const void *ptr)
246 {
247         int copied, left;
248
249         if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
250                 left = strncpy_count(val, ptr, size);
251                 copied = size - left;
252                 BUG_ON(copied < 0);
253
254                 return copied;
255         }
256         left = str_vm((unsigned long)ptr, val, size, 1);
257         if (left < 0)
258                 return left;
259         copied = size - left;
260         BUG_ON(copied < 0);
261
262         return copied;
263 }
264
265 int strlen_fromuser_size(unsigned int size, const void *ptr)
266 {
267         int copied, left;
268
269         if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
270                 copied = strnlen(ptr, size) + 1;
271                 BUG_ON(copied < 0);
272
273                 return copied;
274         }
275         left = str_vm((unsigned long)ptr, NULL, size, 0);
276         if (left < 0)
277                 return 0;
278         copied = size - left + 1;
279         BUG_ON(copied < 0);
280
281         return copied;
282 }
283
284 int zero_user_size(unsigned int size, void *ptr)
285 {
286         int left;
287
288         if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
289                 memset(ptr, 0, size);
290                 return 0;
291         }
292         left = str_vm((unsigned long)ptr, NULL, size, 2);
293         if (left < 0)
294                 return size;
295         return left;
296 }
297
298 EXPORT_SYMBOL(get_user_size);
299 EXPORT_SYMBOL(put_user_size);
300 EXPORT_SYMBOL(zero_user_size);
301 EXPORT_SYMBOL(copy_str_fromuser_size);
302 EXPORT_SYMBOL(strlen_fromuser_size);