2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #define _ASM_UACCESS_H
12 #include <linux/errno.h>
13 #include <linux/sched.h>
15 #define STR(x) __STR(x)
19 * The fs value determines whether argument validity checking should be
20 * performed or not. If get_fs() == USER_DS, checking is performed, with
21 * get_fs() == KERNEL_DS, checking is bypassed.
23 * For historical reasons, these macros are grossly misnamed.
25 #define KERNEL_DS ((mm_segment_t) { (unsigned long) 0L })
26 #define USER_DS ((mm_segment_t) { (unsigned long) -1L })
29 #define VERIFY_WRITE 1
31 #define get_fs() (current->thread.current_ds)
32 #define get_ds() (KERNEL_DS)
33 #define set_fs(x) (current->thread.current_ds=(x))
35 #define segment_eq(a,b) ((a).seg == (b).seg)
39 * Is a address valid? This does a straighforward calculation rather
43 * - "addr" doesn't have any high-bits set
44 * - AND "size" doesn't have any high-bits set
45 * - AND "addr+size" doesn't have any high-bits set
46 * - OR we are in kernel mode.
48 #define __ua_size(size) \
49 (__builtin_constant_p(size) && (signed long) (size) > 0 ? 0 : (size))
51 #define __access_ok(addr,size,mask) \
52 (((signed long)((mask)&(addr | (addr + size) | __ua_size(size)))) >= 0)
54 #define __access_mask ((long)(get_fs().seg))
56 #define access_ok(type,addr,size) \
57 __access_ok(((unsigned long)(addr)),(size),__access_mask)
59 static inline int verify_area(int type, const void * addr, unsigned long size)
61 return access_ok(type,addr,size) ? 0 : -EFAULT;
65 * Uh, these should become the main single-value transfer routines ...
66 * They automatically use the right size if we just have the right
69 * As MIPS uses the same address space for kernel and user data, we
70 * can just do these as direct assignments.
73 * (a) re-use the arguments for side effects (sizeof is ok)
74 * (b) require any knowledge of processes at this stage
76 #define put_user(x,ptr) \
77 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
78 #define get_user(x,ptr) \
79 __get_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
82 * The "__xxx" versions do not do address space checking, useful when
83 * doing multiple accesses to the same area (the user has to do the
84 * checks by hand with "access_ok()")
86 #define __put_user(x,ptr) \
87 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
88 #define __get_user(x,ptr) \
89 __get_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
91 struct __large_struct { unsigned long buf[100]; };
92 #define __m(x) (*(struct __large_struct *)(x))
94 #define __get_user_nocheck(x,ptr,size) \
97 __typeof(*(ptr)) __gu_val; \
99 __asm__("":"=r" (__gu_val)); \
100 __gu_addr = (long) (ptr); \
101 __asm__("":"=r" (__gu_err)); \
103 case 1: __get_user_asm("lb"); break; \
104 case 2: __get_user_asm("lh"); break; \
105 case 4: __get_user_asm("lw"); break; \
106 case 8: __get_user_asm("ld"); break; \
107 default: __get_user_unknown(); break; \
108 } x = (__typeof__(*(ptr))) __gu_val; __gu_err; \
111 #define __get_user_check(x,ptr,size) \
114 __typeof__(*(ptr)) __gu_val; \
116 __asm__("":"=r" (__gu_val)); \
117 __gu_addr = (long) (ptr); \
118 __asm__("":"=r" (__gu_err)); \
119 if (__access_ok(__gu_addr,size,__access_mask)) { \
121 case 1: __get_user_asm("lb"); break; \
122 case 2: __get_user_asm("lh"); break; \
123 case 4: __get_user_asm("lw"); break; \
124 case 8: __get_user_asm("ld"); break; \
125 default: __get_user_unknown(); break; \
127 } x = (__typeof__(*(ptr))) __gu_val; __gu_err; \
130 #define __get_user_asm(insn) \
132 __asm__ __volatile__( \
133 "1:\t" insn "\t%1,%2\n\t" \
136 ".section\t.fixup,\"ax\"\n" \
137 "3:\tli\t%0,%3\n\t" \
141 ".section\t__ex_table,\"a\"\n\t" \
142 ".dword\t1b,3b\n\t" \
144 :"=r" (__gu_err), "=r" (__gu_val) \
145 :"o" (__m(__gu_addr)), "i" (-EFAULT)); \
148 extern void __get_user_unknown(void);
150 #define __put_user_nocheck(x,ptr,size) \
153 __typeof__(*(ptr)) __pu_val; \
156 __pu_addr = (long) (ptr); \
157 __asm__("":"=r" (__pu_err)); \
159 case 1: __put_user_asm("sb"); break; \
160 case 2: __put_user_asm("sh"); break; \
161 case 4: __put_user_asm("sw"); break; \
162 case 8: __put_user_asm("sd"); break; \
163 default: __put_user_unknown(); break; \
167 #define __put_user_check(x,ptr,size) \
170 __typeof__(*(ptr)) __pu_val; \
173 __pu_addr = (long) (ptr); \
174 __asm__("":"=r" (__pu_err)); \
175 if (__access_ok(__pu_addr,size,__access_mask)) { \
177 case 1: __put_user_asm("sb"); break; \
178 case 2: __put_user_asm("sh"); break; \
179 case 4: __put_user_asm("sw"); break; \
180 case 8: __put_user_asm("sd"); break; \
181 default: __put_user_unknown(); break; \
186 #define __put_user_asm(insn) \
188 __asm__ __volatile__( \
189 "1:\t" insn "\t%z1, %2\t\t\t# __put_user_asm\n\t" \
192 ".section\t.fixup,\"ax\"\n" \
193 "3:\tli\t%0,%3\n\t" \
196 ".section\t__ex_table,\"a\"\n\t" \
197 ".dword\t1b,3b\n\t" \
200 :"Jr" (__pu_val), "o" (__m(__pu_addr)), "i" (-EFAULT)); \
203 extern void __put_user_unknown(void);
206 * We're generating jump to subroutines which will be outside the range of
210 #define __MODULE_JAL(destination) \
212 "dla\t$1, " #destination "\n\t" \
216 #define __MODULE_JAL(destination) \
217 "jal\t" #destination "\n\t"
220 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
222 #define __invoke_copy_to_user(to,from,n) \
224 register void *__cu_to_r __asm__ ("$4"); \
225 register const void *__cu_from_r __asm__ ("$5"); \
226 register long __cu_len_r __asm__ ("$6"); \
229 __cu_from_r = (from); \
231 __asm__ __volatile__( \
232 __MODULE_JAL(__copy_user) \
233 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
235 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
240 #define __copy_to_user(to,from,n) \
243 const void *__cu_from; \
247 __cu_from = (from); \
249 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
253 #define copy_to_user(to,from,n) \
256 const void *__cu_from; \
260 __cu_from = (from); \
262 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) \
263 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
268 #define __invoke_copy_from_user(to,from,n) \
270 register void *__cu_to_r __asm__ ("$4"); \
271 register const void *__cu_from_r __asm__ ("$5"); \
272 register long __cu_len_r __asm__ ("$6"); \
275 __cu_from_r = (from); \
277 __asm__ __volatile__( \
278 ".set\tnoreorder\n\t" \
279 __MODULE_JAL(__copy_user) \
281 "daddu\t$1, %1, %2\n\t" \
283 ".set\treorder\n\t" \
285 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
287 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
292 #define __copy_from_user(to,from,n) \
295 const void *__cu_from; \
299 __cu_from = (from); \
301 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
306 #define copy_from_user(to,from,n) \
309 const void *__cu_from; \
313 __cu_from = (from); \
315 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) \
316 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
321 static inline __kernel_size_t
322 __clear_user(void *addr, __kernel_size_t size)
326 __asm__ __volatile__(
330 __MODULE_JAL(__bzero)
333 : "r" (addr), "r" (size)
334 : "$4", "$5", "$6", "$8", "$9", "$31");
339 #define clear_user(addr,n) \
341 void * __cl_addr = (addr); \
342 unsigned long __cl_size = (n); \
343 if (__cl_size && __access_ok(VERIFY_WRITE, \
344 ((unsigned long)(__cl_addr)), __cl_size)) \
345 __cl_size = __clear_user(__cl_addr, __cl_size); \
350 * Returns: -EFAULT if exception before terminator, N if the entire
351 * buffer filled, else strlen.
354 __strncpy_from_user(char *__to, const char *__from, long __len)
358 __asm__ __volatile__(
362 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
365 : "r" (__to), "r" (__from), "r" (__len)
366 : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
372 strncpy_from_user(char *__to, const char *__from, long __len)
376 __asm__ __volatile__(
380 __MODULE_JAL(__strncpy_from_user_asm)
383 : "r" (__to), "r" (__from), "r" (__len)
384 : "$2", "$3", "$4", "$5", "$6", "$8", "$31", "memory");
389 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
390 static inline long __strlen_user(const char *s)
394 __asm__ __volatile__(
396 __MODULE_JAL(__strlen_user_nocheck_asm)
400 : "$2", "$4", "$8", "$31");
405 static inline long strlen_user(const char *s)
409 __asm__ __volatile__(
411 __MODULE_JAL(__strlen_user_asm)
415 : "$2", "$4", "$8", "$31");
420 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
421 static inline long __strnlen_user(const char *s, long n)
425 __asm__ __volatile__(
428 __MODULE_JAL(__strnlen_user_nocheck_asm)
432 : "$2", "$4", "$5", "$8", "$31");
437 static inline long strnlen_user(const char *s, long n)
441 __asm__ __volatile__(
444 __MODULE_JAL(__strnlen_user_asm)
448 : "$2", "$4", "$5", "$8", "$31");
453 struct exception_table_entry
456 unsigned long nextinsn;
459 /* Returns 0 if exception not found and fixup.unit otherwise. */
460 extern unsigned long search_exception_table(unsigned long addr);
462 /* Returns the new pc */
463 #define fixup_exception(map_reg, fixup_unit, pc) \
468 #endif /* _ASM_UACCESS_H */