4 * (C) Copyright 2003 Ingo Molnar
6 * Generic implementation of all the user-VM access functions, without
7 * relying on being able to access the VM directly.
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/errno.h>
14 #include <linux/highmem.h>
15 #include <linux/pagemap.h>
16 #include <linux/smp_lock.h>
17 #include <linux/ptrace.h>
18 #include <linux/interrupt.h>
20 #include <asm/pgtable.h>
21 #include <asm/uaccess.h>
22 #include <asm/atomic_kmap.h>
25 * Get kernel address of the user page and pin it.
27 static inline struct page *pin_page(unsigned long addr, int write)
29 struct mm_struct *mm = current->mm ? : &init_mm;
30 struct page *page = NULL;
34 * Do a quick atomic lookup first - this is the fastpath.
37 page = follow_page(mm, addr, write);
38 if (likely(page != NULL)) {
39 if (!PageReserved(page))
45 * No luck - bad address or need to fault in the page:
48 /* Release the lock so get_user_pages can sleep */
49 spin_unlock(&mm->page_table_lock);
52 * In the context of filemap_copy_from_user(), we are not allowed
53 * to sleep. We must fail this usercopy attempt and allow
54 * filemap_copy_from_user() to recover: drop its atomic kmap and use
55 * a sleeping kmap instead.
58 spin_lock(&mm->page_table_lock);
62 down_read(&mm->mmap_sem);
63 ret = get_user_pages(current, mm, addr, 1, write, 0, NULL, NULL);
64 up_read(&mm->mmap_sem);
65 spin_lock(&mm->page_table_lock);
71 * Go try the follow_page again.
76 static inline void unpin_page(struct page *page)
82 * Access another process' address space.
83 * Source/target buffer must be kernel space,
84 * Do not walk the page table directly, use get_user_pages
86 static int rw_vm(unsigned long addr, void *buf, int len, int write)
88 struct mm_struct *mm = current->mm ? : &init_mm;
93 spin_lock(&mm->page_table_lock);
95 /* ignore errors, just check how much was sucessfully transfered */
97 struct page *page = NULL;
101 page = pin_page(addr, write);
106 offset = addr & (PAGE_SIZE-1);
107 if (bytes > PAGE_SIZE-offset)
108 bytes = PAGE_SIZE-offset;
110 maddr = kmap_atomic(page, KM_USER_COPY);
112 #define HANDLE_TYPE(type) \
113 case sizeof(type): *(type *)(maddr+offset) = *(type *)(buf); break;
119 HANDLE_TYPE(long long);
121 memcpy(maddr + offset, buf, bytes);
125 #define HANDLE_TYPE(type) \
126 case sizeof(type): *(type *)(buf) = *(type *)(maddr+offset); break;
130 HANDLE_TYPE(long long);
132 memcpy(buf, maddr + offset, bytes);
136 kunmap_atomic(maddr, KM_USER_COPY);
142 spin_unlock(&mm->page_table_lock);
147 static int str_vm(unsigned long addr, void *buf0, int len, int copy)
149 struct mm_struct *mm = current->mm ? : &init_mm;
156 spin_lock(&mm->page_table_lock);
158 /* ignore errors, just check how much was sucessfully transfered */
160 int bytes, offset, left, copied;
163 page = pin_page(addr, copy == 2);
165 spin_unlock(&mm->page_table_lock);
169 offset = addr & (PAGE_SIZE-1);
170 if (bytes > PAGE_SIZE-offset)
171 bytes = PAGE_SIZE-offset;
173 maddr = kmap_atomic(page, KM_USER_COPY);
175 memset(maddr + offset, 0, bytes);
178 } else if (copy == 1) {
179 left = strncpy_count(buf, maddr + offset, bytes);
180 copied = bytes - left;
182 copied = strnlen(maddr + offset, bytes);
183 left = bytes - copied;
185 BUG_ON(bytes < 0 || copied < 0);
186 kunmap_atomic(maddr, KM_USER_COPY);
194 spin_unlock(&mm->page_table_lock);
200 * Copies memory from userspace (ptr) into kernelspace (val).
202 * returns # of bytes not copied.
204 int get_user_size(unsigned int size, void *val, const void *ptr)
208 if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
209 ret = __direct_copy_from_user(val, ptr, size);
211 ret = rw_vm((unsigned long)ptr, val, size, 0);
216 memset(val + size - ret, 0, ret);
221 * Copies memory from kernelspace (val) into userspace (ptr).
223 * returns # of bytes not copied.
225 int put_user_size(unsigned int size, const void *val, void *ptr)
227 if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
228 return __direct_copy_to_user(ptr, val, size);
230 return rw_vm((unsigned long)ptr, (void *)val, size, 1);
233 int copy_str_fromuser_size(unsigned int size, void *val, const void *ptr)
237 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
238 left = strncpy_count(val, ptr, size);
239 copied = size - left;
244 left = str_vm((unsigned long)ptr, val, size, 1);
247 copied = size - left;
253 int strlen_fromuser_size(unsigned int size, const void *ptr)
257 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
258 copied = strnlen(ptr, size) + 1;
263 left = str_vm((unsigned long)ptr, NULL, size, 0);
266 copied = size - left + 1;
272 int zero_user_size(unsigned int size, void *ptr)
276 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
277 memset(ptr, 0, size);
280 left = str_vm((unsigned long)ptr, NULL, size, 2);
286 EXPORT_SYMBOL(get_user_size);
287 EXPORT_SYMBOL(put_user_size);
288 EXPORT_SYMBOL(zero_user_size);
289 EXPORT_SYMBOL(copy_str_fromuser_size);
290 EXPORT_SYMBOL(strlen_fromuser_size);