2 * Authors: Bjorn Wesen (bjornw@axis.com)
3 * Hans-Peter Nilsson (hp@axis.com)
6 * Revision 1.1.1.1 2005/04/11 02:50:52 jack
9 * Revision 1.1.1.1 2005/01/10 13:16:55 jack
12 * Revision 1.9 2002/11/20 18:20:17 hp
13 * Make all static inline functions extern inline.
15 * Revision 1.8 2001/10/29 13:01:48 bjornw
16 * Removed unused variable tmp2 in strnlen_user
18 * Revision 1.7 2001/10/02 12:44:52 hp
19 * Add support for 64-bit put_user/get_user
21 * Revision 1.6 2001/10/01 14:51:17 bjornw
22 * Added register prefixes and removed underscores
24 * Revision 1.5 2000/10/25 03:33:21 hp
25 * - Provide implementation for everything else but get_user and put_user;
26 * copying inline to/from user for constant length 0..16, 20, 24, and
27 * clearing for 0..4, 8, 12, 16, 20, 24, strncpy_from_user and strnlen_user
29 * - Constraints for destination addr in get_user cannot be memory, only reg.
30 * - Correct labels for PC at expected fault points.
31 * - Nits with assembly code.
32 * - Don't use statement expressions without value; use "do {} while (0)".
33 * - Return correct values from __generic_... functions.
35 * Revision 1.4 2000/09/12 16:28:25 bjornw
36 * * Removed comments from the get/put user asm code
37 * * Constrains for destination addr in put_user cannot be memory, only reg
39 * Revision 1.3 2000/09/12 14:30:20 bjornw
40 * MAX_ADDR_USER does not exist anymore
42 * Revision 1.2 2000/07/13 15:52:48 bjornw
43 * New user-access functions
45 * Revision 1.1.1.1 2000/07/10 16:32:31 bjornw
46 * CRIS architecture, working draft
52 /* Asm:s have been tweaked (within the domain of correctness) to give
53 satisfactory results for "gcc version 2.96 20000427 (experimental)".
57 Register $r9 is chosen for temporaries, being a call-clobbered register
58 first in line to be used (notably for local blocks), not colliding with
59 parameter registers. */
61 #ifndef _CRIS_UACCESS_H
62 #define _CRIS_UACCESS_H
65 #include <linux/sched.h>
66 #include <linux/errno.h>
67 #include <asm/processor.h>
71 #define VERIFY_WRITE 1
74 * The fs value determines whether argument validity checking should be
75 * performed or not. If get_fs() == USER_DS, checking is performed, with
76 * get_fs() == KERNEL_DS, checking is bypassed.
78 * For historical reasons, these macros are grossly misnamed.
81 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
83 /* addr_limit is the maximum accessible address for the task. we misuse
84 * the KERNEL_DS and USER_DS values to both assign and compare the
85 * addr_limit values through the equally misnamed get/set_fs macros.
89 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
90 #define USER_DS MAKE_MM_SEG(TASK_SIZE)
92 #define get_ds() (KERNEL_DS)
93 #define get_fs() (current->addr_limit)
94 #define set_fs(x) (current->addr_limit = (x))
96 #define segment_eq(a,b) ((a).seg == (b).seg)
98 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
99 #define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
100 #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
101 #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
103 extern inline int verify_area(int type, const void * addr, unsigned long size)
105 return access_ok(type,addr,size) ? 0 : -EFAULT;
110 * The exception table consists of pairs of addresses: the first is the
111 * address of an instruction that is allowed to fault, and the second is
112 * the address at which the program should continue. No registers are
113 * modified, so it is entirely up to the continuation code to figure out
116 * All the routines below use bits of fixup code that are out of line
117 * with the main instruction path. This means when everything is well,
118 * we don't even have to jump over them. Further, they do not intrude
119 * on our cache or tlb entries.
122 struct exception_table_entry
124 unsigned long insn, fixup;
127 /* Returns 0 if exception not found and fixup otherwise. */
128 extern unsigned long search_exception_table(unsigned long);
132 * These are the main single-value transfer routines. They automatically
133 * use the right size if we just have the right pointer type.
135 * This gets kind of ugly. We want to return _two_ values in "get_user()"
136 * and yet we don't want to do any pointers, because that is too much
137 * of a performance impact. Thus we have a few rather ugly macros here,
138 * and hide all the uglyness from the user.
140 * The "__xxx" versions of the user access functions are versions that
141 * do not verify the address space, that must have been done previously
142 * with a separate "access_ok()" call (this is used when we do multiple
143 * accesses to the same area of user memory).
145 * As we use the same address space for kernel and user data on
146 * CRIS, we can just do these as direct assignments. (Of course, the
147 * exception handling means that it's no longer "just"...)
149 #define get_user(x,ptr) \
150 __get_user_check((x),(ptr),sizeof(*(ptr)))
151 #define put_user(x,ptr) \
152 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
154 #define __get_user(x,ptr) \
155 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
156 #define __put_user(x,ptr) \
157 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
160 * The "xxx_ret" versions return constant specified in third argument, if
161 * something bad happens. These macros can be optimized for the
162 * case of just returning from the function xxx_ret is used.
165 #define put_user_ret(x,ptr,ret) \
166 do { if (put_user(x,ptr)) return ret; } while (0)
168 #define get_user_ret(x,ptr,ret) \
169 do { if (get_user(x,ptr)) return ret; } while (0)
171 #define __put_user_ret(x,ptr,ret) \
172 do { if (__put_user(x,ptr)) return ret; } while (0)
174 #define __get_user_ret(x,ptr,ret) \
175 do { if (__get_user(x,ptr)) return ret; } while (0)
178 extern long __put_user_bad(void);
180 #define __put_user_nocheck(x,ptr,size) \
183 __put_user_size((x),(ptr),(size),__pu_err); \
187 #define __put_user_check(x,ptr,size) \
189 long __pu_err = -EFAULT; \
190 __typeof__(*(ptr)) *__pu_addr = (ptr); \
191 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
192 __put_user_size((x),__pu_addr,(size),__pu_err); \
196 #define __put_user_size(x,ptr,size,retval) \
200 case 1: __put_user_asm(x,ptr,retval,"move.b"); break; \
201 case 2: __put_user_asm(x,ptr,retval,"move.w"); break; \
202 case 4: __put_user_asm(x,ptr,retval,"move.d"); break; \
203 case 8: __put_user_asm_64(x,ptr,retval); break; \
204 default: __put_user_bad(); \
208 struct __large_struct { unsigned long buf[100]; };
209 #define __m(x) (*(struct __large_struct *)(x))
212 * We don't tell gcc that we are accessing memory, but this is OK
213 * because we do not write to any memory gcc knows about, so there
214 * are no aliasing issues.
216 * Note that PC at a fault is the address *after* the faulting
219 #define __put_user_asm(x, addr, err, op) \
220 __asm__ __volatile__( \
223 " .section .fixup,\"ax\"\n" \
224 "3: move.d %3,%0\n" \
227 " .section __ex_table,\"a\"\n" \
231 : "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
233 #define __put_user_asm_64(x, addr, err) \
234 __asm__ __volatile__( \
235 " move.d %M1,[%2]\n" \
236 "2: move.d %H1,[%2+4]\n" \
238 " .section .fixup,\"ax\"\n" \
239 "3: move.d %3,%0\n" \
242 " .section __ex_table,\"a\"\n" \
247 : "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
250 #define __get_user_nocheck(x,ptr,size) \
252 long __gu_err, __gu_val; \
253 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
254 (x) = (__typeof__(*(ptr)))__gu_val; \
258 #define __get_user_check(x,ptr,size) \
260 long __gu_err = -EFAULT, __gu_val = 0; \
261 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
262 if (access_ok(VERIFY_READ,__gu_addr,size)) \
263 __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
264 (x) = (__typeof__(*(ptr)))__gu_val; \
268 extern long __get_user_bad(void);
270 #define __get_user_size(x,ptr,size,retval) \
274 case 1: __get_user_asm(x,ptr,retval,"move.b"); break; \
275 case 2: __get_user_asm(x,ptr,retval,"move.w"); break; \
276 case 4: __get_user_asm(x,ptr,retval,"move.d"); break; \
277 case 8: __get_user_asm_64(x,ptr,retval); break; \
278 default: (x) = __get_user_bad(); \
282 /* See comment before __put_user_asm. */
284 #define __get_user_asm(x, addr, err, op) \
285 __asm__ __volatile__( \
288 " .section .fixup,\"ax\"\n" \
289 "3: move.d %3,%0\n" \
293 " .section __ex_table,\"a\"\n" \
296 : "=r" (err), "=r" (x) \
297 : "r" (addr), "g" (-EFAULT), "0" (err))
299 #define __get_user_asm_64(x, addr, err) \
300 __asm__ __volatile__( \
301 " move.d [%2],%M1\n" \
302 "2: move.d [%2+4],%H1\n" \
304 " .section .fixup,\"ax\"\n" \
305 "3: move.d %3,%0\n" \
309 " .section __ex_table,\"a\"\n" \
313 : "=r" (err), "=r" (x) \
314 : "r" (addr), "g" (-EFAULT), "0" (err))
316 /* More complex functions. Most are inline, but some call functions that
317 live in lib/usercopy.c */
319 extern unsigned long __copy_user(void *to, const void *from, unsigned long n);
320 extern unsigned long __copy_user_zeroing(void *to, const void *from, unsigned long n);
321 extern unsigned long __do_clear_user(void *to, unsigned long n);
324 * Copy a null terminated string from userspace.
327 * -EFAULT for an exception
328 * count if we hit the buffer limit
329 * bytes copied if we hit a null byte
330 * (without the null byte)
334 __do_strncpy_from_user(char *dst, const char *src, long count)
342 * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
345 * This code is deduced from:
350 * while ((*dst++ = (tmp2 = *src++)) != 0
354 * res = count - tmp1;
359 __asm__ __volatile__ (
361 " move.b [%2+],$r9\n"
363 " move.b $r9,[%1+]\n"
367 " move.b [%2+],$r9\n"
372 " .section .fixup,\"ax\"\n"
376 /* There's one address for a fault at the first move, and
377 two possible PC values for a fault at the second move,
378 being a delay-slot filler. However, the branch-target
379 for the second move is the same as the first address.
380 Just so you don't get confused... */
382 " .section __ex_table,\"a\"\n"
386 : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
387 : "3" (count), "1" (dst), "2" (src), "g" (-EFAULT)
393 extern inline unsigned long
394 __generic_copy_to_user(void *to, const void *from, unsigned long n)
396 if (access_ok(VERIFY_WRITE, to, n))
397 return __copy_user(to,from,n);
401 extern inline unsigned long
402 __generic_copy_from_user(void *to, const void *from, unsigned long n)
404 if (access_ok(VERIFY_READ, from, n))
405 return __copy_user_zeroing(to,from,n);
409 extern inline unsigned long
410 __generic_clear_user(void *to, unsigned long n)
412 if (access_ok(VERIFY_WRITE, to, n))
413 return __do_clear_user(to,n);
418 __strncpy_from_user(char *dst, const char *src, long count)
420 return __do_strncpy_from_user(dst, src, count);
424 strncpy_from_user(char *dst, const char *src, long count)
427 if (access_ok(VERIFY_READ, src, 1))
428 res = __do_strncpy_from_user(dst, src, count);
432 /* A few copy asms to build up the more complex ones from.
434 Note again, a post-increment is performed regardless of whether a bus
435 fault occurred in that instruction, and PC for a faulted insn is the
436 address *after* the insn. */
438 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
439 __asm__ __volatile__ ( \
442 " .section .fixup,\"ax\"\n" \
446 " .section __ex_table,\"a\"\n" \
449 : "=r" (to), "=r" (from), "=r" (ret) \
450 : "0" (to), "1" (from), "2" (ret) \
453 #define __asm_copy_from_user_1(to, from, ret) \
454 __asm_copy_user_cont(to, from, ret, \
455 " move.b [%1+],$r9\n" \
456 "2: move.b $r9,[%0+]\n", \
458 " clear.b [%0+]\n", \
461 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
462 __asm_copy_user_cont(to, from, ret, \
463 " move.w [%1+],$r9\n" \
464 "2: move.w $r9,[%0+]\n" COPY, \
466 " clear.w [%0+]\n" FIXUP, \
467 " .dword 2b,3b\n" TENTRY)
469 #define __asm_copy_from_user_2(to, from, ret) \
470 __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
472 #define __asm_copy_from_user_3(to, from, ret) \
473 __asm_copy_from_user_2x_cont(to, from, ret, \
474 " move.b [%1+],$r9\n" \
475 "4: move.b $r9,[%0+]\n", \
477 " clear.b [%0+]\n", \
480 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
481 __asm_copy_user_cont(to, from, ret, \
482 " move.d [%1+],$r9\n" \
483 "2: move.d $r9,[%0+]\n" COPY, \
485 " clear.d [%0+]\n" FIXUP, \
486 " .dword 2b,3b\n" TENTRY)
488 #define __asm_copy_from_user_4(to, from, ret) \
489 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
491 #define __asm_copy_from_user_5(to, from, ret) \
492 __asm_copy_from_user_4x_cont(to, from, ret, \
493 " move.b [%1+],$r9\n" \
494 "4: move.b $r9,[%0+]\n", \
496 " clear.b [%0+]\n", \
499 #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
500 __asm_copy_from_user_4x_cont(to, from, ret, \
501 " move.w [%1+],$r9\n" \
502 "4: move.w $r9,[%0+]\n" COPY, \
504 " clear.w [%0+]\n" FIXUP, \
505 " .dword 4b,5b\n" TENTRY)
507 #define __asm_copy_from_user_6(to, from, ret) \
508 __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
510 #define __asm_copy_from_user_7(to, from, ret) \
511 __asm_copy_from_user_6x_cont(to, from, ret, \
512 " move.b [%1+],$r9\n" \
513 "6: move.b $r9,[%0+]\n", \
515 " clear.b [%0+]\n", \
518 #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
519 __asm_copy_from_user_4x_cont(to, from, ret, \
520 " move.d [%1+],$r9\n" \
521 "4: move.d $r9,[%0+]\n" COPY, \
523 " clear.d [%0+]\n" FIXUP, \
524 " .dword 4b,5b\n" TENTRY)
526 #define __asm_copy_from_user_8(to, from, ret) \
527 __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
529 #define __asm_copy_from_user_9(to, from, ret) \
530 __asm_copy_from_user_8x_cont(to, from, ret, \
531 " move.b [%1+],$r9\n" \
532 "6: move.b $r9,[%0+]\n", \
534 " clear.b [%0+]\n", \
537 #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
538 __asm_copy_from_user_8x_cont(to, from, ret, \
539 " move.w [%1+],$r9\n" \
540 "6: move.w $r9,[%0+]\n" COPY, \
542 " clear.w [%0+]\n" FIXUP, \
543 " .dword 6b,7b\n" TENTRY)
545 #define __asm_copy_from_user_10(to, from, ret) \
546 __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
548 #define __asm_copy_from_user_11(to, from, ret) \
549 __asm_copy_from_user_10x_cont(to, from, ret, \
550 " move.b [%1+],$r9\n" \
551 "8: move.b $r9,[%0+]\n", \
553 " clear.b [%0+]\n", \
556 #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
557 __asm_copy_from_user_8x_cont(to, from, ret, \
558 " move.d [%1+],$r9\n" \
559 "6: move.d $r9,[%0+]\n" COPY, \
561 " clear.d [%0+]\n" FIXUP, \
562 " .dword 6b,7b\n" TENTRY)
564 #define __asm_copy_from_user_12(to, from, ret) \
565 __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
567 #define __asm_copy_from_user_13(to, from, ret) \
568 __asm_copy_from_user_12x_cont(to, from, ret, \
569 " move.b [%1+],$r9\n" \
570 "8: move.b $r9,[%0+]\n", \
572 " clear.b [%0+]\n", \
575 #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
576 __asm_copy_from_user_12x_cont(to, from, ret, \
577 " move.w [%1+],$r9\n" \
578 "8: move.w $r9,[%0+]\n" COPY, \
580 " clear.w [%0+]\n" FIXUP, \
581 " .dword 8b,9b\n" TENTRY)
583 #define __asm_copy_from_user_14(to, from, ret) \
584 __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
586 #define __asm_copy_from_user_15(to, from, ret) \
587 __asm_copy_from_user_14x_cont(to, from, ret, \
588 " move.b [%1+],$r9\n" \
589 "10: move.b $r9,[%0+]\n", \
591 " clear.b [%0+]\n", \
594 #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
595 __asm_copy_from_user_12x_cont(to, from, ret, \
596 " move.d [%1+],$r9\n" \
597 "8: move.d $r9,[%0+]\n" COPY, \
599 " clear.d [%0+]\n" FIXUP, \
600 " .dword 8b,9b\n" TENTRY)
602 #define __asm_copy_from_user_16(to, from, ret) \
603 __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
605 #define __asm_copy_from_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
606 __asm_copy_from_user_16x_cont(to, from, ret, \
607 " move.d [%1+],$r9\n" \
608 "10: move.d $r9,[%0+]\n" COPY, \
610 " clear.d [%0+]\n" FIXUP, \
611 " .dword 10b,11b\n" TENTRY)
613 #define __asm_copy_from_user_20(to, from, ret) \
614 __asm_copy_from_user_20x_cont(to, from, ret, "", "", "")
616 #define __asm_copy_from_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
617 __asm_copy_from_user_20x_cont(to, from, ret, \
618 " move.d [%1+],$r9\n" \
619 "12: move.d $r9,[%0+]\n" COPY, \
621 " clear.d [%0+]\n" FIXUP, \
622 " .dword 12b,13b\n" TENTRY)
624 #define __asm_copy_from_user_24(to, from, ret) \
625 __asm_copy_from_user_24x_cont(to, from, ret, "", "", "")
627 /* And now, the to-user ones. */
629 #define __asm_copy_to_user_1(to, from, ret) \
630 __asm_copy_user_cont(to, from, ret, \
631 " move.b [%1+],$r9\n" \
632 " move.b $r9,[%0+]\n2:\n", \
636 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
637 __asm_copy_user_cont(to, from, ret, \
638 " move.w [%1+],$r9\n" \
639 " move.w $r9,[%0+]\n2:\n" COPY, \
640 "3: addq 2,%2\n" FIXUP, \
641 " .dword 2b,3b\n" TENTRY)
643 #define __asm_copy_to_user_2(to, from, ret) \
644 __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
646 #define __asm_copy_to_user_3(to, from, ret) \
647 __asm_copy_to_user_2x_cont(to, from, ret, \
648 " move.b [%1+],$r9\n" \
649 " move.b $r9,[%0+]\n4:\n", \
653 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
654 __asm_copy_user_cont(to, from, ret, \
655 " move.d [%1+],$r9\n" \
656 " move.d $r9,[%0+]\n2:\n" COPY, \
657 "3: addq 4,%2\n" FIXUP, \
658 " .dword 2b,3b\n" TENTRY)
660 #define __asm_copy_to_user_4(to, from, ret) \
661 __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
663 #define __asm_copy_to_user_5(to, from, ret) \
664 __asm_copy_to_user_4x_cont(to, from, ret, \
665 " move.b [%1+],$r9\n" \
666 " move.b $r9,[%0+]\n4:\n", \
670 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
671 __asm_copy_to_user_4x_cont(to, from, ret, \
672 " move.w [%1+],$r9\n" \
673 " move.w $r9,[%0+]\n4:\n" COPY, \
674 "5: addq 2,%2\n" FIXUP, \
675 " .dword 4b,5b\n" TENTRY)
677 #define __asm_copy_to_user_6(to, from, ret) \
678 __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
680 #define __asm_copy_to_user_7(to, from, ret) \
681 __asm_copy_to_user_6x_cont(to, from, ret, \
682 " move.b [%1+],$r9\n" \
683 " move.b $r9,[%0+]\n6:\n", \
687 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
688 __asm_copy_to_user_4x_cont(to, from, ret, \
689 " move.d [%1+],$r9\n" \
690 " move.d $r9,[%0+]\n4:\n" COPY, \
691 "5: addq 4,%2\n" FIXUP, \
692 " .dword 4b,5b\n" TENTRY)
694 #define __asm_copy_to_user_8(to, from, ret) \
695 __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
697 #define __asm_copy_to_user_9(to, from, ret) \
698 __asm_copy_to_user_8x_cont(to, from, ret, \
699 " move.b [%1+],$r9\n" \
700 " move.b $r9,[%0+]\n6:\n", \
704 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
705 __asm_copy_to_user_8x_cont(to, from, ret, \
706 " move.w [%1+],$r9\n" \
707 " move.w $r9,[%0+]\n6:\n" COPY, \
708 "7: addq 2,%2\n" FIXUP, \
709 " .dword 6b,7b\n" TENTRY)
711 #define __asm_copy_to_user_10(to, from, ret) \
712 __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
714 #define __asm_copy_to_user_11(to, from, ret) \
715 __asm_copy_to_user_10x_cont(to, from, ret, \
716 " move.b [%1+],$r9\n" \
717 " move.b $r9,[%0+]\n8:\n", \
721 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
722 __asm_copy_to_user_8x_cont(to, from, ret, \
723 " move.d [%1+],$r9\n" \
724 " move.d $r9,[%0+]\n6:\n" COPY, \
725 "7: addq 4,%2\n" FIXUP, \
726 " .dword 6b,7b\n" TENTRY)
728 #define __asm_copy_to_user_12(to, from, ret) \
729 __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
731 #define __asm_copy_to_user_13(to, from, ret) \
732 __asm_copy_to_user_12x_cont(to, from, ret, \
733 " move.b [%1+],$r9\n" \
734 " move.b $r9,[%0+]\n8:\n", \
738 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
739 __asm_copy_to_user_12x_cont(to, from, ret, \
740 " move.w [%1+],$r9\n" \
741 " move.w $r9,[%0+]\n8:\n" COPY, \
742 "9: addq 2,%2\n" FIXUP, \
743 " .dword 8b,9b\n" TENTRY)
745 #define __asm_copy_to_user_14(to, from, ret) \
746 __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
748 #define __asm_copy_to_user_15(to, from, ret) \
749 __asm_copy_to_user_14x_cont(to, from, ret, \
750 " move.b [%1+],$r9\n" \
751 " move.b $r9,[%0+]\n10:\n", \
755 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
756 __asm_copy_to_user_12x_cont(to, from, ret, \
757 " move.d [%1+],$r9\n" \
758 " move.d $r9,[%0+]\n8:\n" COPY, \
759 "9: addq 4,%2\n" FIXUP, \
760 " .dword 8b,9b\n" TENTRY)
762 #define __asm_copy_to_user_16(to, from, ret) \
763 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
765 #define __asm_copy_to_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
766 __asm_copy_to_user_16x_cont(to, from, ret, \
767 " move.d [%1+],$r9\n" \
768 " move.d $r9,[%0+]\n10:\n" COPY, \
769 "11: addq 4,%2\n" FIXUP, \
770 " .dword 10b,11b\n" TENTRY)
772 #define __asm_copy_to_user_20(to, from, ret) \
773 __asm_copy_to_user_20x_cont(to, from, ret, "", "", "")
775 #define __asm_copy_to_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
776 __asm_copy_to_user_20x_cont(to, from, ret, \
777 " move.d [%1+],$r9\n" \
778 " move.d $r9,[%0+]\n12:\n" COPY, \
779 "13: addq 4,%2\n" FIXUP, \
780 " .dword 12b,13b\n" TENTRY)
782 #define __asm_copy_to_user_24(to, from, ret) \
783 __asm_copy_to_user_24x_cont(to, from, ret, "", "", "")
785 /* Define a few clearing asms with exception handlers. */
787 /* This frame-asm is like the __asm_copy_user_cont one, but has one less
790 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
791 __asm__ __volatile__ ( \
794 " .section .fixup,\"ax\"\n" \
798 " .section __ex_table,\"a\"\n" \
801 : "=r" (to), "=r" (ret) \
802 : "0" (to), "1" (ret) \
805 #define __asm_clear_1(to, ret) \
806 __asm_clear(to, ret, \
807 " clear.b [%0+]\n2:\n", \
811 #define __asm_clear_2(to, ret) \
812 __asm_clear(to, ret, \
813 " clear.w [%0+]\n2:\n", \
817 #define __asm_clear_3(to, ret) \
818 __asm_clear(to, ret, \
820 "2: clear.b [%0+]\n3:\n", \
826 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
827 __asm_clear(to, ret, \
828 " clear.d [%0+]\n2:\n" CLEAR, \
829 "3: addq 4,%1\n" FIXUP, \
830 " .dword 2b,3b\n" TENTRY)
832 #define __asm_clear_4(to, ret) \
833 __asm_clear_4x_cont(to, ret, "", "", "")
835 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
836 __asm_clear_4x_cont(to, ret, \
837 " clear.d [%0+]\n4:\n" CLEAR, \
838 "5: addq 4,%1\n" FIXUP, \
839 " .dword 4b,5b\n" TENTRY)
841 #define __asm_clear_8(to, ret) \
842 __asm_clear_8x_cont(to, ret, "", "", "")
844 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
845 __asm_clear_8x_cont(to, ret, \
846 " clear.d [%0+]\n6:\n" CLEAR, \
847 "7: addq 4,%1\n" FIXUP, \
848 " .dword 6b,7b\n" TENTRY)
850 #define __asm_clear_12(to, ret) \
851 __asm_clear_12x_cont(to, ret, "", "", "")
853 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
854 __asm_clear_12x_cont(to, ret, \
855 " clear.d [%0+]\n8:\n" CLEAR, \
856 "9: addq 4,%1\n" FIXUP, \
857 " .dword 8b,9b\n" TENTRY)
859 #define __asm_clear_16(to, ret) \
860 __asm_clear_16x_cont(to, ret, "", "", "")
862 #define __asm_clear_20x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
863 __asm_clear_16x_cont(to, ret, \
864 " clear.d [%0+]\n10:\n" CLEAR, \
865 "11: addq 4,%1\n" FIXUP, \
866 " .dword 10b,11b\n" TENTRY)
868 #define __asm_clear_20(to, ret) \
869 __asm_clear_20x_cont(to, ret, "", "", "")
871 #define __asm_clear_24x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
872 __asm_clear_20x_cont(to, ret, \
873 " clear.d [%0+]\n12:\n" CLEAR, \
874 "13: addq 4,%1\n" FIXUP, \
875 " .dword 12b,13b\n" TENTRY)
877 #define __asm_clear_24(to, ret) \
878 __asm_clear_24x_cont(to, ret, "", "", "")
880 /* Note that if these expand awfully if made into switch constructs, so
883 extern inline unsigned long
884 __constant_copy_from_user(void *to, const void *from, unsigned long n)
886 unsigned long ret = 0;
890 __asm_copy_from_user_1(to, from, ret);
892 __asm_copy_from_user_2(to, from, ret);
894 __asm_copy_from_user_3(to, from, ret);
896 __asm_copy_from_user_4(to, from, ret);
898 __asm_copy_from_user_5(to, from, ret);
900 __asm_copy_from_user_6(to, from, ret);
902 __asm_copy_from_user_7(to, from, ret);
904 __asm_copy_from_user_8(to, from, ret);
906 __asm_copy_from_user_9(to, from, ret);
908 __asm_copy_from_user_10(to, from, ret);
910 __asm_copy_from_user_11(to, from, ret);
912 __asm_copy_from_user_12(to, from, ret);
914 __asm_copy_from_user_13(to, from, ret);
916 __asm_copy_from_user_14(to, from, ret);
918 __asm_copy_from_user_15(to, from, ret);
920 __asm_copy_from_user_16(to, from, ret);
922 __asm_copy_from_user_20(to, from, ret);
924 __asm_copy_from_user_24(to, from, ret);
926 ret = __generic_copy_from_user(to, from, n);
931 /* Ditto, don't make a switch out of this. */
933 extern inline unsigned long
934 __constant_copy_to_user(void *to, const void *from, unsigned long n)
936 unsigned long ret = 0;
940 __asm_copy_to_user_1(to, from, ret);
942 __asm_copy_to_user_2(to, from, ret);
944 __asm_copy_to_user_3(to, from, ret);
946 __asm_copy_to_user_4(to, from, ret);
948 __asm_copy_to_user_5(to, from, ret);
950 __asm_copy_to_user_6(to, from, ret);
952 __asm_copy_to_user_7(to, from, ret);
954 __asm_copy_to_user_8(to, from, ret);
956 __asm_copy_to_user_9(to, from, ret);
958 __asm_copy_to_user_10(to, from, ret);
960 __asm_copy_to_user_11(to, from, ret);
962 __asm_copy_to_user_12(to, from, ret);
964 __asm_copy_to_user_13(to, from, ret);
966 __asm_copy_to_user_14(to, from, ret);
968 __asm_copy_to_user_15(to, from, ret);
970 __asm_copy_to_user_16(to, from, ret);
972 __asm_copy_to_user_20(to, from, ret);
974 __asm_copy_to_user_24(to, from, ret);
976 ret = __generic_copy_to_user(to, from, n);
981 /* No switch, please. */
983 extern inline unsigned long
984 __constant_clear_user(void *to, unsigned long n)
986 unsigned long ret = 0;
990 __asm_clear_1(to, ret);
992 __asm_clear_2(to, ret);
994 __asm_clear_3(to, ret);
996 __asm_clear_4(to, ret);
998 __asm_clear_8(to, ret);
1000 __asm_clear_12(to, ret);
1002 __asm_clear_16(to, ret);
1004 __asm_clear_20(to, ret);
1006 __asm_clear_24(to, ret);
1008 ret = __generic_clear_user(to, n);
1014 #define clear_user(to, n) \
1015 (__builtin_constant_p(n) ? \
1016 __constant_clear_user(to, n) : \
1017 __generic_clear_user(to, n))
1019 #define copy_from_user(to, from, n) \
1020 (__builtin_constant_p(n) ? \
1021 __constant_copy_from_user(to, from, n) : \
1022 __generic_copy_from_user(to, from, n))
1024 #define copy_to_user(to, from, n) \
1025 (__builtin_constant_p(n) ? \
1026 __constant_copy_to_user(to, from, n) : \
1027 __generic_copy_to_user(to, from, n))
1029 #define copy_to_user_ret(to,from,n,retval) \
1030 do { if (copy_to_user(to,from,n)) return retval; } while (0)
1031 #define copy_from_user_ret(to,from,n,retval) \
1032 do { if (copy_from_user(to,from,n)) return retval; } while (0)
1034 /* We let the __ versions of copy_from/to_user inline, because they're often
1035 * used in fast paths and have only a small space overhead.
1038 extern inline unsigned long
1039 __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
1041 return __copy_user_zeroing(to,from,n);
1044 extern inline unsigned long
1045 __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
1047 return __copy_user(to,from,n);
1050 extern inline unsigned long
1051 __generic_clear_user_nocheck(void *to, unsigned long n)
1053 return __do_clear_user(to,n);
1056 /* without checking */
1058 #define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))
1059 #define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))
1060 #define __clear_user(to,n) __generic_clear_user_nocheck((to),(n))
1063 * Return the size of a string (including the ending 0)
1065 * Return length of string in userspace including terminating 0
1066 * or 0 for error. Return a value greater than N if too long.
1070 strnlen_user(const char *s, long n)
1074 if (!access_ok(VERIFY_READ, s, 0))
1078 * This code is deduced from:
1081 * while (tmp1-- > 0 && *s++)
1089 __asm__ __volatile__ (
1102 " .section .fixup,\"ax\"\n"
1107 /* There's one address for a fault at the first move, and
1108 two possible PC values for a fault at the second move,
1109 being a delay-slot filler. However, the branch-target
1110 for the second move is the same as the first address.
1111 Just so you don't get confused... */
1113 " .section __ex_table,\"a\"\n"
1117 : "=r" (res), "=r" (tmp1)
1124 #define strlen_user(str) strnlen_user((str), 0x7ffffffe)
1126 #endif /* __ASSEMBLY__ */
1128 #endif /* _CRIS_UACCESS_H */