4 * (C) Copyright 2003 Ingo Molnar
6 * Generic implementation of all the user-VM access functions, without
7 * relying on being able to access the VM directly.
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/errno.h>
14 #include <linux/highmem.h>
15 #include <linux/pagemap.h>
16 #include <linux/smp_lock.h>
17 #include <linux/ptrace.h>
18 #include <linux/interrupt.h>
20 #include <asm/pgtable.h>
21 #include <asm/uaccess.h>
22 #include <asm/atomic_kmap.h>
25 * Get kernel address of the user page and pin it.
27 static inline struct page *pin_page(unsigned long addr, int write,
30 struct mm_struct *mm = current->mm ? : &init_mm;
31 struct page *page = NULL;
35 * Do a quick atomic lookup first - this is the fastpath.
38 page = follow_page_pfn(mm, addr, write, pfn);
39 if (likely(page != NULL)) {
40 if (!PageReserved(page))
47 * No luck - bad address or need to fault in the page:
50 /* Release the lock so get_user_pages can sleep */
51 spin_unlock(&mm->page_table_lock);
54 * In the context of filemap_copy_from_user(), we are not allowed
55 * to sleep. We must fail this usercopy attempt and allow
56 * filemap_copy_from_user() to recover: drop its atomic kmap and use
57 * a sleeping kmap instead.
60 spin_lock(&mm->page_table_lock);
64 down_read(&mm->mmap_sem);
65 ret = get_user_pages(current, mm, addr, 1, write, 0, NULL, NULL);
66 up_read(&mm->mmap_sem);
67 spin_lock(&mm->page_table_lock);
73 * Go try the follow_page again.
78 static inline void unpin_page(struct page *page)
84 * Access another process' address space.
85 * Source/target buffer must be kernel space,
86 * Do not walk the page table directly, use get_user_pages
88 static int rw_vm(unsigned long addr, void *buf, int len, int write)
90 struct mm_struct *mm = current->mm ? : &init_mm;
95 spin_lock(&mm->page_table_lock);
97 /* ignore errors, just check how much was sucessfully transfered */
99 struct page *page = NULL;
100 unsigned long pfn = 0;
104 page = pin_page(addr, write, &pfn);
109 offset = addr & (PAGE_SIZE-1);
110 if (bytes > PAGE_SIZE-offset)
111 bytes = PAGE_SIZE-offset;
114 maddr = kmap_atomic(page, KM_USER_COPY);
116 maddr = kmap_atomic_nocache_pfn(pfn, KM_USER_COPY);
118 #define HANDLE_TYPE(type) \
119 case sizeof(type): *(type *)(maddr+offset) = *(type *)(buf); break;
125 HANDLE_TYPE(long long);
127 memcpy(maddr + offset, buf, bytes);
131 #define HANDLE_TYPE(type) \
132 case sizeof(type): *(type *)(buf) = *(type *)(maddr+offset); break;
136 HANDLE_TYPE(long long);
138 memcpy(buf, maddr + offset, bytes);
142 kunmap_atomic(maddr, KM_USER_COPY);
149 spin_unlock(&mm->page_table_lock);
154 static int str_vm(unsigned long addr, void *buf0, int len, int copy)
156 struct mm_struct *mm = current->mm ? : &init_mm;
163 spin_lock(&mm->page_table_lock);
165 /* ignore errors, just check how much was sucessfully transfered */
167 int bytes, offset, left, copied;
168 unsigned long pfn = 0;
171 page = pin_page(addr, copy == 2, &pfn);
173 spin_unlock(&mm->page_table_lock);
177 offset = addr & (PAGE_SIZE-1);
178 if (bytes > PAGE_SIZE-offset)
179 bytes = PAGE_SIZE-offset;
182 maddr = kmap_atomic(page, KM_USER_COPY);
184 maddr = kmap_atomic_nocache_pfn(pfn, KM_USER_COPY);
186 memset(maddr + offset, 0, bytes);
189 } else if (copy == 1) {
190 left = strncpy_count(buf, maddr + offset, bytes);
191 copied = bytes - left;
193 copied = strnlen(maddr + offset, bytes);
194 left = bytes - copied;
196 BUG_ON(bytes < 0 || copied < 0);
197 kunmap_atomic(maddr, KM_USER_COPY);
206 spin_unlock(&mm->page_table_lock);
212 * Copies memory from userspace (ptr) into kernelspace (val).
214 * returns # of bytes not copied.
216 int get_user_size(unsigned int size, void *val, const void *ptr)
220 if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
221 ret = __direct_copy_from_user(val, ptr, size);
223 ret = rw_vm((unsigned long)ptr, val, size, 0);
228 memset(val + size - ret, 0, ret);
233 * Copies memory from kernelspace (val) into userspace (ptr).
235 * returns # of bytes not copied.
237 int put_user_size(unsigned int size, const void *val, void *ptr)
239 if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
240 return __direct_copy_to_user(ptr, val, size);
242 return rw_vm((unsigned long)ptr, (void *)val, size, 1);
245 int copy_str_fromuser_size(unsigned int size, void *val, const void *ptr)
249 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
250 left = strncpy_count(val, ptr, size);
251 copied = size - left;
256 left = str_vm((unsigned long)ptr, val, size, 1);
259 copied = size - left;
265 int strlen_fromuser_size(unsigned int size, const void *ptr)
269 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
270 copied = strnlen(ptr, size) + 1;
275 left = str_vm((unsigned long)ptr, NULL, size, 0);
278 copied = size - left + 1;
284 int zero_user_size(unsigned int size, void *ptr)
288 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
289 memset(ptr, 0, size);
292 left = str_vm((unsigned long)ptr, NULL, size, 2);
298 EXPORT_SYMBOL(get_user_size);
299 EXPORT_SYMBOL(put_user_size);
300 EXPORT_SYMBOL(zero_user_size);
301 EXPORT_SYMBOL(copy_str_fromuser_size);
302 EXPORT_SYMBOL(strlen_fromuser_size);