ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / include / asm-s390 / uaccess.h
1 /*
2  *  include/asm-s390/uaccess.h
3  *
4  *  S390 version
5  *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6  *    Author(s): Hartmut Penner (hp@de.ibm.com),
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  *
9  *  Derived from "include/asm-i386/uaccess.h"
10  */
11 #ifndef __S390_UACCESS_H
12 #define __S390_UACCESS_H
13
14 /*
15  * User space memory access functions
16  */
17 #include <linux/sched.h>
18 #include <linux/errno.h>
19
20 #define VERIFY_READ     0
21 #define VERIFY_WRITE    1
22
23
24 /*
25  * The fs value determines whether argument validity checking should be
26  * performed or not.  If get_fs() == USER_DS, checking is performed, with
27  * get_fs() == KERNEL_DS, checking is bypassed.
28  *
29  * For historical reasons, these macros are grossly misnamed.
30  */
31
32 #define MAKE_MM_SEG(a)  ((mm_segment_t) { (a) })
33
34
35 #define KERNEL_DS       MAKE_MM_SEG(0)
36 #define USER_DS         MAKE_MM_SEG(1)
37
38 #define get_ds()        (KERNEL_DS)
39 #define get_fs()        (current->thread.mm_segment)
40
41 #ifdef __s390x__
42 #define set_fs(x) \
43 ({                                                                      \
44         unsigned long __pto;                                            \
45         current->thread.mm_segment = (x);                               \
46         __pto = current->thread.mm_segment.ar4 ?                        \
47                 S390_lowcore.user_asce : S390_lowcore.kernel_asce;      \
48         asm volatile ("lctlg 7,7,%0" : : "m" (__pto) );                 \
49 })
50 #else
51 #define set_fs(x) \
52 ({                                                                      \
53         unsigned long __pto;                                            \
54         current->thread.mm_segment = (x);                               \
55         __pto = current->thread.mm_segment.ar4 ?                        \
56                 S390_lowcore.user_asce : S390_lowcore.kernel_asce;      \
57         asm volatile ("lctl  7,7,%0" : : "m" (__pto) );                 \
58 })
59 #endif
60
61 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
62
63
64 #define __access_ok(addr,size) (1)
65
66 #define access_ok(type,addr,size) __access_ok(addr,size)
67
68 extern inline int verify_area(int type, const void * addr, unsigned long size)
69 {
70         return access_ok(type,addr,size)?0:-EFAULT;
71 }
72
73 /*
74  * The exception table consists of pairs of addresses: the first is the
75  * address of an instruction that is allowed to fault, and the second is
76  * the address at which the program should continue.  No registers are
77  * modified, so it is entirely up to the continuation code to figure out
78  * what to do.
79  *
80  * All the routines below use bits of fixup code that are out of line
81  * with the main instruction path.  This means when everything is well,
82  * we don't even have to jump over them.  Further, they do not intrude
83  * on our cache or tlb entries.
84  */
85
86 struct exception_table_entry
87 {
88         unsigned long insn, fixup;
89 };
90
91 #ifndef __s390x__
92 #define __uaccess_fixup \
93         ".section .fixup,\"ax\"\n"      \
94         "2: lhi    %0,%4\n"             \
95         "   bras   1,3f\n"              \
96         "   .long  1b\n"                \
97         "3: l      1,0(1)\n"            \
98         "   br     1\n"                 \
99         ".previous\n"                   \
100         ".section __ex_table,\"a\"\n"   \
101         "   .align 4\n"                 \
102         "   .long  0b,2b\n"             \
103         ".previous"
104 #define __uaccess_clobber "cc", "1"
105 #else /* __s390x__ */
106 #define __uaccess_fixup \
107         ".section .fixup,\"ax\"\n"      \
108         "2: lghi   %0,%4\n"             \
109         "   jg     1b\n"                \
110         ".previous\n"                   \
111         ".section __ex_table,\"a\"\n"   \
112         "   .align 8\n"                 \
113         "   .quad  0b,2b\n"             \
114         ".previous"
115 #define __uaccess_clobber "cc"
116 #endif /* __s390x__ */
117
118 /*
119  * These are the main single-value transfer routines.  They automatically
120  * use the right size if we just have the right pointer type.
121  */
122 #if __GNUC__ > 2
123 #define __put_user_asm(x, ptr, err) \
124 ({                                                              \
125         err = 0;                                                \
126         asm volatile(                                           \
127                 "0: mvcs  0(%1,%2),%3,%0\n"                     \
128                 "1:\n"                                          \
129                 __uaccess_fixup                                 \
130                 : "+&d" (err)                                   \
131                 : "d" (sizeof(*(ptr))), "a" (ptr), "Q" (x),     \
132                   "K" (-EFAULT)                                 \
133                 : __uaccess_clobber );                          \
134 })
135 #else
136 #define __put_user_asm(x, ptr, err) \
137 ({                                                              \
138         err = 0;                                                \
139         asm volatile(                                           \
140                 "0: mvcs  0(%1,%2),0(%3),%0\n"                  \
141                 "1:\n"                                          \
142                 __uaccess_fixup                                 \
143                 : "+&d" (err)                                   \
144                 : "d" (sizeof(*(ptr))), "a" (ptr), "a" (&(x)),  \
145                   "K" (-EFAULT), "m" (x)                        \
146                 : __uaccess_clobber );                          \
147 })
148 #endif
149
150 #define __put_user(x, ptr) \
151 ({                                                              \
152         __typeof__(*(ptr)) __x = (x);                           \
153         int __pu_err;                                           \
154         switch (sizeof (*(ptr))) {                              \
155         case 1:                                                 \
156         case 2:                                                 \
157         case 4:                                                 \
158         case 8:                                                 \
159                 __put_user_asm(__x, ptr, __pu_err);             \
160                 break;                                          \
161         default:                                                \
162                 __pu_err = __put_user_bad();                    \
163                 break;                                          \
164          }                                                      \
165         __pu_err;                                               \
166 })
167
168 #define put_user(x, ptr)                                        \
169 ({                                                              \
170         might_sleep();                                          \
171         __put_user(x, ptr);                                     \
172 })
173
174
175 extern int __put_user_bad(void);
176
177 #if __GNUC__ > 2
178 #define __get_user_asm(x, ptr, err) \
179 ({                                                              \
180         err = 0;                                                \
181         asm volatile (                                          \
182                 "0: mvcp  %O1(%2,%R1),0(%3),%0\n"               \
183                 "1:\n"                                          \
184                 __uaccess_fixup                                 \
185                 : "+&d" (err), "=Q" (x)                         \
186                 : "d" (sizeof(*(ptr))), "a" (ptr),              \
187                   "K" (-EFAULT)                                 \
188                 : __uaccess_clobber );                          \
189 })
190 #else
191 #define __get_user_asm(x, ptr, err) \
192 ({                                                              \
193         err = 0;                                                \
194         asm volatile (                                          \
195                 "0: mvcp  0(%2,%5),0(%3),%0\n"                  \
196                 "1:\n"                                          \
197                 __uaccess_fixup                                 \
198                 : "+&d" (err), "=m" (x)                         \
199                 : "d" (sizeof(*(ptr))), "a" (ptr),              \
200                   "K" (-EFAULT), "a" (&(x))                     \
201                 : __uaccess_clobber );                          \
202 })
203 #endif
204
205 #define __get_user(x, ptr)                                      \
206 ({                                                              \
207         __typeof__(*(ptr)) __x;                                 \
208         int __gu_err;                                           \
209         switch (sizeof(*(ptr))) {                               \
210         case 1:                                                 \
211         case 2:                                                 \
212         case 4:                                                 \
213         case 8:                                                 \
214                 __get_user_asm(__x, ptr, __gu_err);             \
215                 break;                                          \
216         default:                                                \
217                 __x = 0;                                        \
218                 __gu_err = __get_user_bad();                    \
219                 break;                                          \
220         }                                                       \
221         (x) = __x;                                              \
222         __gu_err;                                               \
223 })
224
225 #define get_user(x, ptr)                                        \
226 ({                                                              \
227         might_sleep();                                          \
228         __get_user(x, ptr);                                     \
229 })
230
231 extern int __get_user_bad(void);
232
233 extern long __copy_to_user_asm(const void *from, long n, void *to);
234
235 /**
236  * __copy_to_user: - Copy a block of data into user space, with less checking.
237  * @to:   Destination address, in user space.
238  * @from: Source address, in kernel space.
239  * @n:    Number of bytes to copy.
240  *
241  * Context: User context only.  This function may sleep.
242  *
243  * Copy data from kernel space to user space.  Caller must check
244  * the specified block with access_ok() before calling this function.
245  *
246  * Returns number of bytes that could not be copied.
247  * On success, this will be zero.
248  */
249 static inline unsigned long
250 __copy_to_user(void __user *to, const void *from, unsigned long n)
251 {
252         return __copy_to_user_asm(from, n, to);
253 }
254
255 /**
256  * copy_to_user: - Copy a block of data into user space.
257  * @to:   Destination address, in user space.
258  * @from: Source address, in kernel space.
259  * @n:    Number of bytes to copy.
260  *
261  * Context: User context only.  This function may sleep.
262  *
263  * Copy data from kernel space to user space.
264  *
265  * Returns number of bytes that could not be copied.
266  * On success, this will be zero.
267  */
268 static inline unsigned long
269 copy_to_user(void __user *to, const void *from, unsigned long n)
270 {
271         might_sleep();
272         if (access_ok(VERIFY_WRITE, to, n))
273                 n = __copy_to_user(to, from, n);
274         return n;
275 }
276
277 extern long __copy_from_user_asm(void *to, long n, const void *from);
278
279 /**
280  * __copy_from_user: - Copy a block of data from user space, with less checking.
281  * @to:   Destination address, in kernel space.
282  * @from: Source address, in user space.
283  * @n:    Number of bytes to copy.
284  *
285  * Context: User context only.  This function may sleep.
286  *
287  * Copy data from user space to kernel space.  Caller must check
288  * the specified block with access_ok() before calling this function.
289  *
290  * Returns number of bytes that could not be copied.
291  * On success, this will be zero.
292  *
293  * If some data could not be copied, this function will pad the copied
294  * data to the requested size using zero bytes.
295  */
296 static inline unsigned long
297 __copy_from_user(void *to, const void __user *from, unsigned long n)
298 {
299         return __copy_from_user_asm(to, n, from);
300 }
301
302 /**
303  * copy_from_user: - Copy a block of data from user space.
304  * @to:   Destination address, in kernel space.
305  * @from: Source address, in user space.
306  * @n:    Number of bytes to copy.
307  *
308  * Context: User context only.  This function may sleep.
309  *
310  * Copy data from user space to kernel space.
311  *
312  * Returns number of bytes that could not be copied.
313  * On success, this will be zero.
314  *
315  * If some data could not be copied, this function will pad the copied
316  * data to the requested size using zero bytes.
317  */
318 static inline unsigned long
319 copy_from_user(void *to, const void __user *from, unsigned long n)
320 {
321         might_sleep();
322         if (access_ok(VERIFY_READ, from, n))
323                 n = __copy_from_user(to, from, n);
324         else
325                 memset(to, 0, n);
326         return n;
327 }
328
329 extern long __copy_in_user_asm(const void *from, long n, void *to);
330
331 static inline unsigned long
332 __copy_in_user(void __user *to, const void __user *from, unsigned long n)
333 {
334         __copy_in_user_asm(from, n, to);
335 }
336
337 static inline unsigned long
338 copy_in_user(void __user *to, const void __user *from, unsigned long n)
339 {
340         might_sleep();
341         if (__access_ok(from,n) && __access_ok(to,n))
342                 n = __copy_in_user_asm(from, n, to);
343         return n;
344 }
345
346 /*
347  * Copy a null terminated string from userspace.
348  */
349 extern long __strncpy_from_user_asm(char *dst, const char *src, long count);
350
351 static inline long
352 strncpy_from_user(char *dst, const char *src, long count)
353 {
354         long res = -EFAULT;
355         might_sleep();
356         if (access_ok(VERIFY_READ, src, 1))
357                 res = __strncpy_from_user_asm(dst, src, count);
358         return res;
359 }
360
361
362 extern long __strnlen_user_asm(const char *src, long count);
363
364 static inline unsigned long
365 strnlen_user(const char * src, unsigned long n)
366 {
367         might_sleep();
368         return __strnlen_user_asm(src, n);
369 }
370
371 /**
372  * strlen_user: - Get the size of a string in user space.
373  * @str: The string to measure.
374  *
375  * Context: User context only.  This function may sleep.
376  *
377  * Get the size of a NUL-terminated string in user space.
378  *
379  * Returns the size of the string INCLUDING the terminating NUL.
380  * On exception, returns 0.
381  *
382  * If there is a limit on the length of a valid string, you may wish to
383  * consider using strnlen_user() instead.
384  */
385 #define strlen_user(str) strnlen_user(str, ~0UL)
386
387 /*
388  * Zero Userspace
389  */
390
391 extern long __clear_user_asm(void *to, long n);
392
393 static inline unsigned long
394 __clear_user(void *to, unsigned long n)
395 {
396         return __clear_user_asm(to, n);
397 }
398
399 static inline unsigned long
400 clear_user(void *to, unsigned long n)
401 {
402         might_sleep();
403         if (access_ok(VERIFY_WRITE, to, n))
404                 n = __clear_user_asm(to, n);
405         return n;
406 }
407
408 #endif /* __S390_UACCESS_H */