2 * Authors: Bjorn Wesen (bjornw@axis.com)
3 * Hans-Peter Nilsson (hp@axis.com)
6 * Revision 1.12 2003/06/17 14:00:42 starvik
7 * Merge of Linux 2.4.21
9 * Revision 1.11 2003/06/04 19:36:45 hp
10 * Remove unused copy-pasted register clobber from __asm_clear
12 * Revision 1.10 2003/04/09 08:22:38 pkj
13 * Typo correction (taken from Linux 2.5).
15 * Revision 1.9 2002/11/20 18:20:17 hp
16 * Make all static inline functions extern inline.
18 * Revision 1.8 2001/10/29 13:01:48 bjornw
19 * Removed unused variable tmp2 in strnlen_user
21 * Revision 1.7 2001/10/02 12:44:52 hp
22 * Add support for 64-bit put_user/get_user
24 * Revision 1.6 2001/10/01 14:51:17 bjornw
25 * Added register prefixes and removed underscores
27 * Revision 1.5 2000/10/25 03:33:21 hp
28 * - Provide implementation for everything else but get_user and put_user;
29 * copying inline to/from user for constant length 0..16, 20, 24, and
30 * clearing for 0..4, 8, 12, 16, 20, 24, strncpy_from_user and strnlen_user
32 * - Constraints for destination addr in get_user cannot be memory, only reg.
33 * - Correct labels for PC at expected fault points.
34 * - Nits with assembly code.
35 * - Don't use statement expressions without value; use "do {} while (0)".
36 * - Return correct values from __generic_... functions.
38 * Revision 1.4 2000/09/12 16:28:25 bjornw
39 * * Removed comments from the get/put user asm code
40 * * Constrains for destination addr in put_user cannot be memory, only reg
42 * Revision 1.3 2000/09/12 14:30:20 bjornw
43 * MAX_ADDR_USER does not exist anymore
45 * Revision 1.2 2000/07/13 15:52:48 bjornw
46 * New user-access functions
48 * Revision 1.1.1.1 2000/07/10 16:32:31 bjornw
49 * CRIS architecture, working draft
55 /* Asm:s have been tweaked (within the domain of correctness) to give
56 satisfactory results for "gcc version 2.96 20000427 (experimental)".
60 Register $r9 is chosen for temporaries, being a call-clobbered register
61 first in line to be used (notably for local blocks), not colliding with
62 parameter registers. */
64 #ifndef _CRIS_UACCESS_H
65 #define _CRIS_UACCESS_H
68 #include <linux/sched.h>
69 #include <linux/errno.h>
70 #include <asm/processor.h>
74 #define VERIFY_WRITE 1
77 * The fs value determines whether argument validity checking should be
78 * performed or not. If get_fs() == USER_DS, checking is performed, with
79 * get_fs() == KERNEL_DS, checking is bypassed.
81 * For historical reasons, these macros are grossly misnamed.
84 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
86 /* addr_limit is the maximum accessible address for the task. we misuse
87 * the KERNEL_DS and USER_DS values to both assign and compare the
88 * addr_limit values through the equally misnamed get/set_fs macros.
92 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
93 #define USER_DS MAKE_MM_SEG(TASK_SIZE)
95 #define get_ds() (KERNEL_DS)
96 #define get_fs() (current->addr_limit)
97 #define set_fs(x) (current->addr_limit = (x))
99 #define segment_eq(a,b) ((a).seg == (b).seg)
101 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
102 #define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
103 #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
104 #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
106 extern inline int verify_area(int type, const void * addr, unsigned long size)
108 return access_ok(type,addr,size) ? 0 : -EFAULT;
113 * The exception table consists of pairs of addresses: the first is the
114 * address of an instruction that is allowed to fault, and the second is
115 * the address at which the program should continue. No registers are
116 * modified, so it is entirely up to the continuation code to figure out
119 * All the routines below use bits of fixup code that are out of line
120 * with the main instruction path. This means when everything is well,
121 * we don't even have to jump over them. Further, they do not intrude
122 * on our cache or tlb entries.
125 struct exception_table_entry
127 unsigned long insn, fixup;
130 /* Returns 0 if exception not found and fixup otherwise. */
131 extern unsigned long search_exception_table(unsigned long);
135 * These are the main single-value transfer routines. They automatically
136 * use the right size if we just have the right pointer type.
138 * This gets kind of ugly. We want to return _two_ values in "get_user()"
139 * and yet we don't want to do any pointers, because that is too much
140 * of a performance impact. Thus we have a few rather ugly macros here,
141 * and hide all the ugliness from the user.
143 * The "__xxx" versions of the user access functions are versions that
144 * do not verify the address space, that must have been done previously
145 * with a separate "access_ok()" call (this is used when we do multiple
146 * accesses to the same area of user memory).
148 * As we use the same address space for kernel and user data on
149 * CRIS, we can just do these as direct assignments. (Of course, the
150 * exception handling means that it's no longer "just"...)
152 #define get_user(x,ptr) \
153 __get_user_check((x),(ptr),sizeof(*(ptr)))
154 #define put_user(x,ptr) \
155 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
157 #define __get_user(x,ptr) \
158 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
159 #define __put_user(x,ptr) \
160 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
162 extern long __put_user_bad(void);
164 #define __put_user_nocheck(x,ptr,size) \
167 __put_user_size((x),(ptr),(size),__pu_err); \
171 #define __put_user_check(x,ptr,size) \
173 long __pu_err = -EFAULT; \
174 __typeof__(*(ptr)) *__pu_addr = (ptr); \
175 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
176 __put_user_size((x),__pu_addr,(size),__pu_err); \
180 #define __put_user_size(x,ptr,size,retval) \
184 case 1: __put_user_asm(x,ptr,retval,"move.b"); break; \
185 case 2: __put_user_asm(x,ptr,retval,"move.w"); break; \
186 case 4: __put_user_asm(x,ptr,retval,"move.d"); break; \
187 case 8: __put_user_asm_64(x,ptr,retval); break; \
188 default: __put_user_bad(); \
192 struct __large_struct { unsigned long buf[100]; };
193 #define __m(x) (*(struct __large_struct *)(x))
196 * We don't tell gcc that we are accessing memory, but this is OK
197 * because we do not write to any memory gcc knows about, so there
198 * are no aliasing issues.
200 * Note that PC at a fault is the address *after* the faulting
203 #define __put_user_asm(x, addr, err, op) \
204 __asm__ __volatile__( \
207 " .section .fixup,\"ax\"\n" \
208 "3: move.d %3,%0\n" \
211 " .section __ex_table,\"a\"\n" \
215 : "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
217 #define __put_user_asm_64(x, addr, err) \
218 __asm__ __volatile__( \
219 " move.d %M1,[%2]\n" \
220 "2: move.d %H1,[%2+4]\n" \
222 " .section .fixup,\"ax\"\n" \
223 "3: move.d %3,%0\n" \
226 " .section __ex_table,\"a\"\n" \
231 : "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
234 #define __get_user_nocheck(x,ptr,size) \
236 long __gu_err, __gu_val; \
237 __get_user_size(__gu_val,(ptr),(size),__gu_err); \
238 (x) = (__typeof__(*(ptr)))__gu_val; \
242 #define __get_user_check(x,ptr,size) \
244 long __gu_err = -EFAULT, __gu_val = 0; \
245 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
246 if (access_ok(VERIFY_READ,__gu_addr,size)) \
247 __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
248 (x) = (__typeof__(*(ptr)))__gu_val; \
252 extern long __get_user_bad(void);
254 #define __get_user_size(x,ptr,size,retval) \
258 case 1: __get_user_asm(x,ptr,retval,"move.b"); break; \
259 case 2: __get_user_asm(x,ptr,retval,"move.w"); break; \
260 case 4: __get_user_asm(x,ptr,retval,"move.d"); break; \
261 case 8: __get_user_asm_64(x,ptr,retval); break; \
262 default: (x) = __get_user_bad(); \
266 /* See comment before __put_user_asm. */
268 #define __get_user_asm(x, addr, err, op) \
269 __asm__ __volatile__( \
272 " .section .fixup,\"ax\"\n" \
273 "3: move.d %3,%0\n" \
277 " .section __ex_table,\"a\"\n" \
280 : "=r" (err), "=r" (x) \
281 : "r" (addr), "g" (-EFAULT), "0" (err))
283 #define __get_user_asm_64(x, addr, err) \
284 __asm__ __volatile__( \
285 " move.d [%2],%M1\n" \
286 "2: move.d [%2+4],%H1\n" \
288 " .section .fixup,\"ax\"\n" \
289 "3: move.d %3,%0\n" \
293 " .section __ex_table,\"a\"\n" \
297 : "=r" (err), "=r" (x) \
298 : "r" (addr), "g" (-EFAULT), "0" (err))
300 /* More complex functions. Most are inline, but some call functions that
301 live in lib/usercopy.c */
303 extern unsigned long __copy_user(void *to, const void *from, unsigned long n);
304 extern unsigned long __copy_user_zeroing(void *to, const void *from, unsigned long n);
305 extern unsigned long __do_clear_user(void *to, unsigned long n);
308 * Copy a null terminated string from userspace.
311 * -EFAULT for an exception
312 * count if we hit the buffer limit
313 * bytes copied if we hit a null byte
314 * (without the null byte)
318 __do_strncpy_from_user(char *dst, const char *src, long count)
326 * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
329 * This code is deduced from:
334 * while ((*dst++ = (tmp2 = *src++)) != 0
338 * res = count - tmp1;
343 __asm__ __volatile__ (
345 " move.b [%2+],$r9\n"
347 " move.b $r9,[%1+]\n"
351 " move.b [%2+],$r9\n"
356 " .section .fixup,\"ax\"\n"
360 /* There's one address for a fault at the first move, and
361 two possible PC values for a fault at the second move,
362 being a delay-slot filler. However, the branch-target
363 for the second move is the same as the first address.
364 Just so you don't get confused... */
366 " .section __ex_table,\"a\"\n"
370 : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
371 : "3" (count), "1" (dst), "2" (src), "g" (-EFAULT)
377 extern inline unsigned long
378 __generic_copy_to_user(void *to, const void *from, unsigned long n)
380 if (access_ok(VERIFY_WRITE, to, n))
381 return __copy_user(to,from,n);
385 extern inline unsigned long
386 __generic_copy_from_user(void *to, const void *from, unsigned long n)
388 if (access_ok(VERIFY_READ, from, n))
389 return __copy_user_zeroing(to,from,n);
393 extern inline unsigned long
394 __generic_clear_user(void *to, unsigned long n)
396 if (access_ok(VERIFY_WRITE, to, n))
397 return __do_clear_user(to,n);
402 __strncpy_from_user(char *dst, const char *src, long count)
404 return __do_strncpy_from_user(dst, src, count);
408 strncpy_from_user(char *dst, const char *src, long count)
411 if (access_ok(VERIFY_READ, src, 1))
412 res = __do_strncpy_from_user(dst, src, count);
416 /* A few copy asms to build up the more complex ones from.
418 Note again, a post-increment is performed regardless of whether a bus
419 fault occurred in that instruction, and PC for a faulted insn is the
420 address *after* the insn. */
422 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
423 __asm__ __volatile__ ( \
426 " .section .fixup,\"ax\"\n" \
430 " .section __ex_table,\"a\"\n" \
433 : "=r" (to), "=r" (from), "=r" (ret) \
434 : "0" (to), "1" (from), "2" (ret) \
437 #define __asm_copy_from_user_1(to, from, ret) \
438 __asm_copy_user_cont(to, from, ret, \
439 " move.b [%1+],$r9\n" \
440 "2: move.b $r9,[%0+]\n", \
442 " clear.b [%0+]\n", \
445 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
446 __asm_copy_user_cont(to, from, ret, \
447 " move.w [%1+],$r9\n" \
448 "2: move.w $r9,[%0+]\n" COPY, \
450 " clear.w [%0+]\n" FIXUP, \
451 " .dword 2b,3b\n" TENTRY)
453 #define __asm_copy_from_user_2(to, from, ret) \
454 __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
456 #define __asm_copy_from_user_3(to, from, ret) \
457 __asm_copy_from_user_2x_cont(to, from, ret, \
458 " move.b [%1+],$r9\n" \
459 "4: move.b $r9,[%0+]\n", \
461 " clear.b [%0+]\n", \
464 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
465 __asm_copy_user_cont(to, from, ret, \
466 " move.d [%1+],$r9\n" \
467 "2: move.d $r9,[%0+]\n" COPY, \
469 " clear.d [%0+]\n" FIXUP, \
470 " .dword 2b,3b\n" TENTRY)
472 #define __asm_copy_from_user_4(to, from, ret) \
473 __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
475 #define __asm_copy_from_user_5(to, from, ret) \
476 __asm_copy_from_user_4x_cont(to, from, ret, \
477 " move.b [%1+],$r9\n" \
478 "4: move.b $r9,[%0+]\n", \
480 " clear.b [%0+]\n", \
483 #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
484 __asm_copy_from_user_4x_cont(to, from, ret, \
485 " move.w [%1+],$r9\n" \
486 "4: move.w $r9,[%0+]\n" COPY, \
488 " clear.w [%0+]\n" FIXUP, \
489 " .dword 4b,5b\n" TENTRY)
491 #define __asm_copy_from_user_6(to, from, ret) \
492 __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
494 #define __asm_copy_from_user_7(to, from, ret) \
495 __asm_copy_from_user_6x_cont(to, from, ret, \
496 " move.b [%1+],$r9\n" \
497 "6: move.b $r9,[%0+]\n", \
499 " clear.b [%0+]\n", \
502 #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
503 __asm_copy_from_user_4x_cont(to, from, ret, \
504 " move.d [%1+],$r9\n" \
505 "4: move.d $r9,[%0+]\n" COPY, \
507 " clear.d [%0+]\n" FIXUP, \
508 " .dword 4b,5b\n" TENTRY)
510 #define __asm_copy_from_user_8(to, from, ret) \
511 __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
513 #define __asm_copy_from_user_9(to, from, ret) \
514 __asm_copy_from_user_8x_cont(to, from, ret, \
515 " move.b [%1+],$r9\n" \
516 "6: move.b $r9,[%0+]\n", \
518 " clear.b [%0+]\n", \
521 #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
522 __asm_copy_from_user_8x_cont(to, from, ret, \
523 " move.w [%1+],$r9\n" \
524 "6: move.w $r9,[%0+]\n" COPY, \
526 " clear.w [%0+]\n" FIXUP, \
527 " .dword 6b,7b\n" TENTRY)
529 #define __asm_copy_from_user_10(to, from, ret) \
530 __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
532 #define __asm_copy_from_user_11(to, from, ret) \
533 __asm_copy_from_user_10x_cont(to, from, ret, \
534 " move.b [%1+],$r9\n" \
535 "8: move.b $r9,[%0+]\n", \
537 " clear.b [%0+]\n", \
540 #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
541 __asm_copy_from_user_8x_cont(to, from, ret, \
542 " move.d [%1+],$r9\n" \
543 "6: move.d $r9,[%0+]\n" COPY, \
545 " clear.d [%0+]\n" FIXUP, \
546 " .dword 6b,7b\n" TENTRY)
548 #define __asm_copy_from_user_12(to, from, ret) \
549 __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
551 #define __asm_copy_from_user_13(to, from, ret) \
552 __asm_copy_from_user_12x_cont(to, from, ret, \
553 " move.b [%1+],$r9\n" \
554 "8: move.b $r9,[%0+]\n", \
556 " clear.b [%0+]\n", \
559 #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
560 __asm_copy_from_user_12x_cont(to, from, ret, \
561 " move.w [%1+],$r9\n" \
562 "8: move.w $r9,[%0+]\n" COPY, \
564 " clear.w [%0+]\n" FIXUP, \
565 " .dword 8b,9b\n" TENTRY)
567 #define __asm_copy_from_user_14(to, from, ret) \
568 __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
570 #define __asm_copy_from_user_15(to, from, ret) \
571 __asm_copy_from_user_14x_cont(to, from, ret, \
572 " move.b [%1+],$r9\n" \
573 "10: move.b $r9,[%0+]\n", \
575 " clear.b [%0+]\n", \
578 #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
579 __asm_copy_from_user_12x_cont(to, from, ret, \
580 " move.d [%1+],$r9\n" \
581 "8: move.d $r9,[%0+]\n" COPY, \
583 " clear.d [%0+]\n" FIXUP, \
584 " .dword 8b,9b\n" TENTRY)
586 #define __asm_copy_from_user_16(to, from, ret) \
587 __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
589 #define __asm_copy_from_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
590 __asm_copy_from_user_16x_cont(to, from, ret, \
591 " move.d [%1+],$r9\n" \
592 "10: move.d $r9,[%0+]\n" COPY, \
594 " clear.d [%0+]\n" FIXUP, \
595 " .dword 10b,11b\n" TENTRY)
597 #define __asm_copy_from_user_20(to, from, ret) \
598 __asm_copy_from_user_20x_cont(to, from, ret, "", "", "")
600 #define __asm_copy_from_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
601 __asm_copy_from_user_20x_cont(to, from, ret, \
602 " move.d [%1+],$r9\n" \
603 "12: move.d $r9,[%0+]\n" COPY, \
605 " clear.d [%0+]\n" FIXUP, \
606 " .dword 12b,13b\n" TENTRY)
608 #define __asm_copy_from_user_24(to, from, ret) \
609 __asm_copy_from_user_24x_cont(to, from, ret, "", "", "")
611 /* And now, the to-user ones. */
613 #define __asm_copy_to_user_1(to, from, ret) \
614 __asm_copy_user_cont(to, from, ret, \
615 " move.b [%1+],$r9\n" \
616 " move.b $r9,[%0+]\n2:\n", \
620 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
621 __asm_copy_user_cont(to, from, ret, \
622 " move.w [%1+],$r9\n" \
623 " move.w $r9,[%0+]\n2:\n" COPY, \
624 "3: addq 2,%2\n" FIXUP, \
625 " .dword 2b,3b\n" TENTRY)
627 #define __asm_copy_to_user_2(to, from, ret) \
628 __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
630 #define __asm_copy_to_user_3(to, from, ret) \
631 __asm_copy_to_user_2x_cont(to, from, ret, \
632 " move.b [%1+],$r9\n" \
633 " move.b $r9,[%0+]\n4:\n", \
637 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
638 __asm_copy_user_cont(to, from, ret, \
639 " move.d [%1+],$r9\n" \
640 " move.d $r9,[%0+]\n2:\n" COPY, \
641 "3: addq 4,%2\n" FIXUP, \
642 " .dword 2b,3b\n" TENTRY)
644 #define __asm_copy_to_user_4(to, from, ret) \
645 __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
647 #define __asm_copy_to_user_5(to, from, ret) \
648 __asm_copy_to_user_4x_cont(to, from, ret, \
649 " move.b [%1+],$r9\n" \
650 " move.b $r9,[%0+]\n4:\n", \
654 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
655 __asm_copy_to_user_4x_cont(to, from, ret, \
656 " move.w [%1+],$r9\n" \
657 " move.w $r9,[%0+]\n4:\n" COPY, \
658 "5: addq 2,%2\n" FIXUP, \
659 " .dword 4b,5b\n" TENTRY)
661 #define __asm_copy_to_user_6(to, from, ret) \
662 __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
664 #define __asm_copy_to_user_7(to, from, ret) \
665 __asm_copy_to_user_6x_cont(to, from, ret, \
666 " move.b [%1+],$r9\n" \
667 " move.b $r9,[%0+]\n6:\n", \
671 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
672 __asm_copy_to_user_4x_cont(to, from, ret, \
673 " move.d [%1+],$r9\n" \
674 " move.d $r9,[%0+]\n4:\n" COPY, \
675 "5: addq 4,%2\n" FIXUP, \
676 " .dword 4b,5b\n" TENTRY)
678 #define __asm_copy_to_user_8(to, from, ret) \
679 __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
681 #define __asm_copy_to_user_9(to, from, ret) \
682 __asm_copy_to_user_8x_cont(to, from, ret, \
683 " move.b [%1+],$r9\n" \
684 " move.b $r9,[%0+]\n6:\n", \
688 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
689 __asm_copy_to_user_8x_cont(to, from, ret, \
690 " move.w [%1+],$r9\n" \
691 " move.w $r9,[%0+]\n6:\n" COPY, \
692 "7: addq 2,%2\n" FIXUP, \
693 " .dword 6b,7b\n" TENTRY)
695 #define __asm_copy_to_user_10(to, from, ret) \
696 __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
698 #define __asm_copy_to_user_11(to, from, ret) \
699 __asm_copy_to_user_10x_cont(to, from, ret, \
700 " move.b [%1+],$r9\n" \
701 " move.b $r9,[%0+]\n8:\n", \
705 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
706 __asm_copy_to_user_8x_cont(to, from, ret, \
707 " move.d [%1+],$r9\n" \
708 " move.d $r9,[%0+]\n6:\n" COPY, \
709 "7: addq 4,%2\n" FIXUP, \
710 " .dword 6b,7b\n" TENTRY)
712 #define __asm_copy_to_user_12(to, from, ret) \
713 __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
715 #define __asm_copy_to_user_13(to, from, ret) \
716 __asm_copy_to_user_12x_cont(to, from, ret, \
717 " move.b [%1+],$r9\n" \
718 " move.b $r9,[%0+]\n8:\n", \
722 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
723 __asm_copy_to_user_12x_cont(to, from, ret, \
724 " move.w [%1+],$r9\n" \
725 " move.w $r9,[%0+]\n8:\n" COPY, \
726 "9: addq 2,%2\n" FIXUP, \
727 " .dword 8b,9b\n" TENTRY)
729 #define __asm_copy_to_user_14(to, from, ret) \
730 __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
732 #define __asm_copy_to_user_15(to, from, ret) \
733 __asm_copy_to_user_14x_cont(to, from, ret, \
734 " move.b [%1+],$r9\n" \
735 " move.b $r9,[%0+]\n10:\n", \
739 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
740 __asm_copy_to_user_12x_cont(to, from, ret, \
741 " move.d [%1+],$r9\n" \
742 " move.d $r9,[%0+]\n8:\n" COPY, \
743 "9: addq 4,%2\n" FIXUP, \
744 " .dword 8b,9b\n" TENTRY)
746 #define __asm_copy_to_user_16(to, from, ret) \
747 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
749 #define __asm_copy_to_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
750 __asm_copy_to_user_16x_cont(to, from, ret, \
751 " move.d [%1+],$r9\n" \
752 " move.d $r9,[%0+]\n10:\n" COPY, \
753 "11: addq 4,%2\n" FIXUP, \
754 " .dword 10b,11b\n" TENTRY)
756 #define __asm_copy_to_user_20(to, from, ret) \
757 __asm_copy_to_user_20x_cont(to, from, ret, "", "", "")
759 #define __asm_copy_to_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
760 __asm_copy_to_user_20x_cont(to, from, ret, \
761 " move.d [%1+],$r9\n" \
762 " move.d $r9,[%0+]\n12:\n" COPY, \
763 "13: addq 4,%2\n" FIXUP, \
764 " .dword 12b,13b\n" TENTRY)
766 #define __asm_copy_to_user_24(to, from, ret) \
767 __asm_copy_to_user_24x_cont(to, from, ret, "", "", "")
769 /* Define a few clearing asms with exception handlers. */
771 /* This frame-asm is like the __asm_copy_user_cont one, but has one less
774 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
775 __asm__ __volatile__ ( \
778 " .section .fixup,\"ax\"\n" \
782 " .section __ex_table,\"a\"\n" \
785 : "=r" (to), "=r" (ret) \
786 : "0" (to), "1" (ret) \
789 #define __asm_clear_1(to, ret) \
790 __asm_clear(to, ret, \
791 " clear.b [%0+]\n2:\n", \
795 #define __asm_clear_2(to, ret) \
796 __asm_clear(to, ret, \
797 " clear.w [%0+]\n2:\n", \
801 #define __asm_clear_3(to, ret) \
802 __asm_clear(to, ret, \
804 "2: clear.b [%0+]\n3:\n", \
810 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
811 __asm_clear(to, ret, \
812 " clear.d [%0+]\n2:\n" CLEAR, \
813 "3: addq 4,%1\n" FIXUP, \
814 " .dword 2b,3b\n" TENTRY)
816 #define __asm_clear_4(to, ret) \
817 __asm_clear_4x_cont(to, ret, "", "", "")
819 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
820 __asm_clear_4x_cont(to, ret, \
821 " clear.d [%0+]\n4:\n" CLEAR, \
822 "5: addq 4,%1\n" FIXUP, \
823 " .dword 4b,5b\n" TENTRY)
825 #define __asm_clear_8(to, ret) \
826 __asm_clear_8x_cont(to, ret, "", "", "")
828 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
829 __asm_clear_8x_cont(to, ret, \
830 " clear.d [%0+]\n6:\n" CLEAR, \
831 "7: addq 4,%1\n" FIXUP, \
832 " .dword 6b,7b\n" TENTRY)
834 #define __asm_clear_12(to, ret) \
835 __asm_clear_12x_cont(to, ret, "", "", "")
837 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
838 __asm_clear_12x_cont(to, ret, \
839 " clear.d [%0+]\n8:\n" CLEAR, \
840 "9: addq 4,%1\n" FIXUP, \
841 " .dword 8b,9b\n" TENTRY)
843 #define __asm_clear_16(to, ret) \
844 __asm_clear_16x_cont(to, ret, "", "", "")
846 #define __asm_clear_20x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
847 __asm_clear_16x_cont(to, ret, \
848 " clear.d [%0+]\n10:\n" CLEAR, \
849 "11: addq 4,%1\n" FIXUP, \
850 " .dword 10b,11b\n" TENTRY)
852 #define __asm_clear_20(to, ret) \
853 __asm_clear_20x_cont(to, ret, "", "", "")
855 #define __asm_clear_24x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
856 __asm_clear_20x_cont(to, ret, \
857 " clear.d [%0+]\n12:\n" CLEAR, \
858 "13: addq 4,%1\n" FIXUP, \
859 " .dword 12b,13b\n" TENTRY)
861 #define __asm_clear_24(to, ret) \
862 __asm_clear_24x_cont(to, ret, "", "", "")
864 /* Note that if these expand awfully if made into switch constructs, so
867 extern inline unsigned long
868 __constant_copy_from_user(void *to, const void *from, unsigned long n)
870 unsigned long ret = 0;
874 __asm_copy_from_user_1(to, from, ret);
876 __asm_copy_from_user_2(to, from, ret);
878 __asm_copy_from_user_3(to, from, ret);
880 __asm_copy_from_user_4(to, from, ret);
882 __asm_copy_from_user_5(to, from, ret);
884 __asm_copy_from_user_6(to, from, ret);
886 __asm_copy_from_user_7(to, from, ret);
888 __asm_copy_from_user_8(to, from, ret);
890 __asm_copy_from_user_9(to, from, ret);
892 __asm_copy_from_user_10(to, from, ret);
894 __asm_copy_from_user_11(to, from, ret);
896 __asm_copy_from_user_12(to, from, ret);
898 __asm_copy_from_user_13(to, from, ret);
900 __asm_copy_from_user_14(to, from, ret);
902 __asm_copy_from_user_15(to, from, ret);
904 __asm_copy_from_user_16(to, from, ret);
906 __asm_copy_from_user_20(to, from, ret);
908 __asm_copy_from_user_24(to, from, ret);
910 ret = __generic_copy_from_user(to, from, n);
915 /* Ditto, don't make a switch out of this. */
917 extern inline unsigned long
918 __constant_copy_to_user(void *to, const void *from, unsigned long n)
920 unsigned long ret = 0;
924 __asm_copy_to_user_1(to, from, ret);
926 __asm_copy_to_user_2(to, from, ret);
928 __asm_copy_to_user_3(to, from, ret);
930 __asm_copy_to_user_4(to, from, ret);
932 __asm_copy_to_user_5(to, from, ret);
934 __asm_copy_to_user_6(to, from, ret);
936 __asm_copy_to_user_7(to, from, ret);
938 __asm_copy_to_user_8(to, from, ret);
940 __asm_copy_to_user_9(to, from, ret);
942 __asm_copy_to_user_10(to, from, ret);
944 __asm_copy_to_user_11(to, from, ret);
946 __asm_copy_to_user_12(to, from, ret);
948 __asm_copy_to_user_13(to, from, ret);
950 __asm_copy_to_user_14(to, from, ret);
952 __asm_copy_to_user_15(to, from, ret);
954 __asm_copy_to_user_16(to, from, ret);
956 __asm_copy_to_user_20(to, from, ret);
958 __asm_copy_to_user_24(to, from, ret);
960 ret = __generic_copy_to_user(to, from, n);
965 /* No switch, please. */
967 extern inline unsigned long
968 __constant_clear_user(void *to, unsigned long n)
970 unsigned long ret = 0;
974 __asm_clear_1(to, ret);
976 __asm_clear_2(to, ret);
978 __asm_clear_3(to, ret);
980 __asm_clear_4(to, ret);
982 __asm_clear_8(to, ret);
984 __asm_clear_12(to, ret);
986 __asm_clear_16(to, ret);
988 __asm_clear_20(to, ret);
990 __asm_clear_24(to, ret);
992 ret = __generic_clear_user(to, n);
998 #define clear_user(to, n) \
999 (__builtin_constant_p(n) ? \
1000 __constant_clear_user(to, n) : \
1001 __generic_clear_user(to, n))
1003 #define copy_from_user(to, from, n) \
1004 (__builtin_constant_p(n) ? \
1005 __constant_copy_from_user(to, from, n) : \
1006 __generic_copy_from_user(to, from, n))
1008 #define copy_to_user(to, from, n) \
1009 (__builtin_constant_p(n) ? \
1010 __constant_copy_to_user(to, from, n) : \
1011 __generic_copy_to_user(to, from, n))
1013 /* We let the __ versions of copy_from/to_user inline, because they're often
1014 * used in fast paths and have only a small space overhead.
1017 extern inline unsigned long
1018 __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
1020 return __copy_user_zeroing(to,from,n);
1023 extern inline unsigned long
1024 __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
1026 return __copy_user(to,from,n);
1029 extern inline unsigned long
1030 __generic_clear_user_nocheck(void *to, unsigned long n)
1032 return __do_clear_user(to,n);
1035 /* without checking */
1037 #define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n))
1038 #define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))
1039 #define __clear_user(to,n) __generic_clear_user_nocheck((to),(n))
1042 * Return the size of a string (including the ending 0)
1044 * Return length of string in userspace including terminating 0
1045 * or 0 for error. Return a value greater than N if too long.
1049 strnlen_user(const char *s, long n)
1053 if (!access_ok(VERIFY_READ, s, 0))
1057 * This code is deduced from:
1060 * while (tmp1-- > 0 && *s++)
1068 __asm__ __volatile__ (
1081 " .section .fixup,\"ax\"\n"
1086 /* There's one address for a fault at the first move, and
1087 two possible PC values for a fault at the second move,
1088 being a delay-slot filler. However, the branch-target
1089 for the second move is the same as the first address.
1090 Just so you don't get confused... */
1092 " .section __ex_table,\"a\"\n"
1096 : "=r" (res), "=r" (tmp1)
1103 #define strlen_user(str) strnlen_user((str), 0x7ffffffe)
1105 #endif /* __ASSEMBLY__ */
1107 #endif /* _CRIS_UACCESS_H */