2 * include/asm-s390/uaccess.h
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Derived from "include/asm-i386/uaccess.h"
11 #ifndef __S390_UACCESS_H
12 #define __S390_UACCESS_H
15 * User space memory access functions
17 #include <linux/sched.h>
20 #define VERIFY_WRITE 1
24 * The fs value determines whether argument validity checking should be
25 * performed or not. If get_fs() == USER_DS, checking is performed, with
26 * get_fs() == KERNEL_DS, checking is bypassed.
28 * For historical reasons, these macros are grossly misnamed.
31 #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
34 #define KERNEL_DS MAKE_MM_SEG(0)
35 #define USER_DS MAKE_MM_SEG(1)
37 #define get_ds() (KERNEL_DS)
38 #define get_fs() (current->addr_limit)
39 #define set_fs(x) ({asm volatile("sar 4,%0"::"a" ((x).ar4)); \
40 current->addr_limit = (x);})
42 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
45 #define __access_ok(addr,size) (1)
47 #define access_ok(type,addr,size) __access_ok(addr,size)
49 extern inline int verify_area(int type, const void * addr, unsigned long size)
51 return access_ok(type,addr,size)?0:-EFAULT;
55 * The exception table consists of pairs of addresses: the first is the
56 * address of an instruction that is allowed to fault, and the second is
57 * the address at which the program should continue. No registers are
58 * modified, so it is entirely up to the continuation code to figure out
61 * All the routines below use bits of fixup code that are out of line
62 * with the main instruction path. This means when everything is well,
63 * we don't even have to jump over them. Further, they do not intrude
64 * on our cache or tlb entries.
67 struct exception_table_entry
69 unsigned long insn, fixup;
72 /* Returns 0 if exception not found and fixup otherwise. */
73 extern unsigned long search_exception_table(unsigned long);
77 * These are the main single-value transfer routines. They automatically
78 * use the right size if we just have the right pointer type.
81 extern inline int __put_user_asm_8(__u64 x, void *ptr)
85 __asm__ __volatile__ ( " sr %1,%1\n"
89 "0: mvc 0(8,4),0(2)\n"
92 ".section .fixup,\"ax\"\n"
100 ".section __ex_table,\"a\"\n"
104 : "=m" (*((__u32*) ptr)), "=&d" (err)
105 : "m" (x), "K" (-EFAULT)
110 extern inline int __put_user_asm_4(__u32 x, void *ptr)
114 __asm__ __volatile__ ( " sr %1,%1\n"
120 ".section .fixup,\"ax\"\n"
128 ".section __ex_table,\"a\"\n"
132 : "=m" (*((__u32*) ptr)) , "=&d" (err)
133 : "d" (x), "K" (-EFAULT)
138 extern inline int __put_user_asm_2(__u16 x, void *ptr)
142 __asm__ __volatile__ ( " sr %1,%1\n"
148 ".section .fixup,\"ax\"\n"
156 ".section __ex_table,\"a\"\n"
160 : "=m" (*((__u16*) ptr)) , "=&d" (err)
161 : "d" (x), "K" (-EFAULT)
166 extern inline int __put_user_asm_1(__u8 x, void *ptr)
170 __asm__ __volatile__ ( " sr %1,%1\n"
176 ".section .fixup,\"ax\"\n"
184 ".section __ex_table,\"a\"\n"
188 : "=m" (*((__u8*) ptr)) , "=&d" (err)
189 : "d" (x), "K" (-EFAULT)
195 * (u8)(u32) ... autsch, but that the only way we can suppress the
196 * warnings when compiling binfmt_elf.c
198 #define __put_user(x, ptr) \
200 __typeof__(*(ptr)) *__pu_addr = (ptr); \
201 __typeof__(*(ptr)) __x = (x); \
203 switch (sizeof (*(ptr))) { \
205 __pu_err = __put_user_asm_1((__u8)(__u32) __x, \
209 __pu_err = __put_user_asm_2((__u16)(__u32) __x, \
213 __pu_err = __put_user_asm_4((__u32) __x, \
217 __pu_err = __put_user_asm_8((__u64) __x, \
221 __pu_err = __put_user_bad(); \
227 #define put_user(x, ptr) __put_user(x, ptr)
229 extern int __put_user_bad(void);
231 #define __get_user_asm_8(x, ptr, err) \
233 __asm__ __volatile__ ( " sr %1,%1\n" \
237 "0: mvc 0(8,2),0(4)\n" \
240 ".section .fixup,\"ax\"\n" \
248 ".section __ex_table,\"a\"\n" \
252 : "=m" (x) , "=&d" (err) \
253 : "m" (*(const __u64*)(ptr)),"K" (-EFAULT) \
254 : "cc", "2", "4" ); \
257 #define __get_user_asm_4(x, ptr, err) \
259 __asm__ __volatile__ ( " sr %1,%1\n" \
265 ".section .fixup,\"ax\"\n" \
273 ".section __ex_table,\"a\"\n" \
277 : "=d" (x) , "=&d" (err) \
278 : "m" (*(const __u32*)(ptr)),"K" (-EFAULT) \
282 #define __get_user_asm_2(x, ptr, err) \
284 __asm__ __volatile__ ( " sr %1,%1\n" \
290 ".section .fixup,\"ax\"\n" \
298 ".section __ex_table,\"a\"\n" \
302 : "=d" (x) , "=&d" (err) \
303 : "m" (*(const __u16*)(ptr)),"K" (-EFAULT) \
307 #define __get_user_asm_1(x, ptr, err) \
309 __asm__ __volatile__ ( " sr %1,%1\n" \
316 ".section .fixup,\"ax\"\n" \
324 ".section __ex_table,\"a\"\n" \
328 : "=d" (x) , "=&d" (err) \
329 : "m" (*(const __u8*)(ptr)),"K" (-EFAULT) \
333 #define __get_user(x, ptr) \
335 __typeof__(ptr) __gu_addr = (ptr); \
336 __typeof__(*(ptr)) __x; \
338 switch (sizeof(*(__gu_addr))) { \
340 __get_user_asm_1(__x, __gu_addr, __gu_err); \
343 __get_user_asm_2(__x, __gu_addr, __gu_err); \
346 __get_user_asm_4(__x, __gu_addr, __gu_err); \
349 __get_user_asm_8(__x, __gu_addr, __gu_err); \
353 __gu_err = __get_user_bad(); \
360 #define get_user(x, ptr) __get_user(x, ptr)
362 extern int __get_user_bad(void);
365 * access register are set up, that 4 points to secondary (user) , 2 to primary (kernel)
368 extern long __copy_to_user_asm(const void *from, long n, const void *to);
370 #define __copy_to_user(to, from, n) \
372 __copy_to_user_asm(from, n, to); \
375 #define copy_to_user(to, from, n) \
378 __typeof__(n) __n = (n); \
379 if (__access_ok(to,__n)) { \
380 err = __copy_to_user_asm(from, __n, to); \
387 extern long __copy_from_user_asm(void *to, long n, const void *from);
389 #define __copy_from_user(to, from, n) \
391 __copy_from_user_asm(to, n, from); \
394 #define copy_from_user(to, from, n) \
397 __typeof__(n) __n = (n); \
398 if (__access_ok(from,__n)) { \
399 err = __copy_from_user_asm(to, __n, from); \
407 * Copy a null terminated string from userspace.
411 __strncpy_from_user(char *dst, const char *src, long count)
414 __asm__ __volatile__ ( " slr %0,%0\n"
427 ".section .fixup,\"ax\"\n"
434 ".section __ex_table,\"a\"\n"
440 : "a" (dst), "d" (src), "d" (count),
442 : "2", "3", "4", "memory", "cc" );
447 strncpy_from_user(char *dst, const char *src, long count)
450 if (access_ok(VERIFY_READ, src, 1))
451 res = __strncpy_from_user(dst, src, count);
457 * Return the size of a string (including the ending 0)
461 static inline unsigned long
462 strnlen_user(const char * src, unsigned long n)
464 __asm__ __volatile__ (" alr %0,%1\n"
474 ".section .fixup,\"ax\"\n"
482 ".section __ex_table,\"a\"\n"
486 : "+&a" (n) : "d" (src)
490 #define strlen_user(str) strnlen_user(str, ~0UL)
496 extern long __clear_user_asm(void *to, long n);
498 #define __clear_user(to, n) \
500 __clear_user_asm(to, n); \
503 static inline unsigned long
504 clear_user(void *to, unsigned long n)
506 if (access_ok(VERIFY_WRITE, to, n))
507 n = __clear_user(to, n);
512 #endif /* _S390_UACCESS_H */