import of upstream 2.4.34.4 from kernel.org
[linux-2.4.git] / include / asm-cris / uaccess.h
1 /* 
2  * Authors:    Bjorn Wesen (bjornw@axis.com)
3  *             Hans-Peter Nilsson (hp@axis.com)
4  *
5  * $Log: uaccess.h,v $
6  * Revision 1.12  2003/06/17 14:00:42  starvik
7  * Merge of Linux 2.4.21
8  *
9  * Revision 1.11  2003/06/04 19:36:45  hp
10  * Remove unused copy-pasted register clobber from __asm_clear
11  *
12  * Revision 1.10  2003/04/09 08:22:38  pkj
13  * Typo correction (taken from Linux 2.5).
14  *
15  * Revision 1.9  2002/11/20 18:20:17  hp
16  * Make all static inline functions extern inline.
17  *
18  * Revision 1.8  2001/10/29 13:01:48  bjornw
19  * Removed unused variable tmp2 in strnlen_user
20  *
21  * Revision 1.7  2001/10/02 12:44:52  hp
22  * Add support for 64-bit put_user/get_user
23  *
24  * Revision 1.6  2001/10/01 14:51:17  bjornw
25  * Added register prefixes and removed underscores
26  *
27  * Revision 1.5  2000/10/25 03:33:21  hp
28  * - Provide implementation for everything else but get_user and put_user;
29  *   copying inline to/from user for constant length 0..16, 20, 24, and
30  *   clearing for 0..4, 8, 12, 16, 20, 24, strncpy_from_user and strnlen_user
31  *   always inline.
32  * - Constraints for destination addr in get_user cannot be memory, only reg.
33  * - Correct labels for PC at expected fault points.
34  * - Nits with assembly code.
35  * - Don't use statement expressions without value; use "do {} while (0)".
36  * - Return correct values from __generic_... functions.
37  *
38  * Revision 1.4  2000/09/12 16:28:25  bjornw
39  * * Removed comments from the get/put user asm code
40  * * Constrains for destination addr in put_user cannot be memory, only reg
41  *
42  * Revision 1.3  2000/09/12 14:30:20  bjornw
43  * MAX_ADDR_USER does not exist anymore
44  *
45  * Revision 1.2  2000/07/13 15:52:48  bjornw
46  * New user-access functions
47  *
48  * Revision 1.1.1.1  2000/07/10 16:32:31  bjornw
49  * CRIS architecture, working draft
50  *
51  *
52  *
53  */
54
55 /* Asm:s have been tweaked (within the domain of correctness) to give
56    satisfactory results for "gcc version 2.96 20000427 (experimental)".
57
58    Check regularly...
59
60    Register $r9 is chosen for temporaries, being a call-clobbered register
61    first in line to be used (notably for local blocks), not colliding with
62    parameter registers.  */
63
64 #ifndef _CRIS_UACCESS_H
65 #define _CRIS_UACCESS_H
66
67 #ifndef __ASSEMBLY__
68 #include <linux/sched.h>
69 #include <linux/errno.h>
70 #include <asm/processor.h>
71 #include <asm/page.h>
72
73 #define VERIFY_READ     0
74 #define VERIFY_WRITE    1
75
76 /*
77  * The fs value determines whether argument validity checking should be
78  * performed or not.  If get_fs() == USER_DS, checking is performed, with
79  * get_fs() == KERNEL_DS, checking is bypassed.
80  *
81  * For historical reasons, these macros are grossly misnamed.
82  */
83
84 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
85
86 /* addr_limit is the maximum accessible address for the task. we misuse
87  * the KERNEL_DS and USER_DS values to both assign and compare the 
88  * addr_limit values through the equally misnamed get/set_fs macros.
89  * (see above)
90  */
91
92 #define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFF)
93 #define USER_DS         MAKE_MM_SEG(TASK_SIZE)
94
95 #define get_ds()        (KERNEL_DS)
96 #define get_fs()        (current->addr_limit)
97 #define set_fs(x)       (current->addr_limit = (x))
98
99 #define segment_eq(a,b) ((a).seg == (b).seg)
100
101 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
102 #define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
103 #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
104 #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
105
106 extern inline int verify_area(int type, const void * addr, unsigned long size)
107 {
108         return access_ok(type,addr,size) ? 0 : -EFAULT;
109 }
110
111
112 /*
113  * The exception table consists of pairs of addresses: the first is the
114  * address of an instruction that is allowed to fault, and the second is
115  * the address at which the program should continue.  No registers are
116  * modified, so it is entirely up to the continuation code to figure out
117  * what to do.
118  *
119  * All the routines below use bits of fixup code that are out of line
120  * with the main instruction path.  This means when everything is well,
121  * we don't even have to jump over them.  Further, they do not intrude
122  * on our cache or tlb entries.
123  */
124
125 struct exception_table_entry
126 {
127         unsigned long insn, fixup;
128 };
129
130 /* Returns 0 if exception not found and fixup otherwise.  */
131 extern unsigned long search_exception_table(unsigned long);
132
133
134 /*
135  * These are the main single-value transfer routines.  They automatically
136  * use the right size if we just have the right pointer type.
137  *
138  * This gets kind of ugly. We want to return _two_ values in "get_user()"
139  * and yet we don't want to do any pointers, because that is too much
140  * of a performance impact. Thus we have a few rather ugly macros here,
141  * and hide all the ugliness from the user.
142  *
143  * The "__xxx" versions of the user access functions are versions that
144  * do not verify the address space, that must have been done previously
145  * with a separate "access_ok()" call (this is used when we do multiple
146  * accesses to the same area of user memory).
147  *
148  * As we use the same address space for kernel and user data on
149  * CRIS, we can just do these as direct assignments.  (Of course, the
150  * exception handling means that it's no longer "just"...)
151  */
152 #define get_user(x,ptr) \
153   __get_user_check((x),(ptr),sizeof(*(ptr)))
154 #define put_user(x,ptr) \
155   __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
156
157 #define __get_user(x,ptr) \
158   __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
159 #define __put_user(x,ptr) \
160   __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
161
162 extern long __put_user_bad(void);
163
164 #define __put_user_nocheck(x,ptr,size)                  \
165 ({                                                      \
166         long __pu_err;                                  \
167         __put_user_size((x),(ptr),(size),__pu_err);     \
168         __pu_err;                                       \
169 })
170
171 #define __put_user_check(x,ptr,size)                            \
172 ({                                                              \
173         long __pu_err = -EFAULT;                                \
174         __typeof__(*(ptr)) *__pu_addr = (ptr);                  \
175         if (access_ok(VERIFY_WRITE,__pu_addr,size))             \
176                 __put_user_size((x),__pu_addr,(size),__pu_err); \
177         __pu_err;                                               \
178 })
179
180 #define __put_user_size(x,ptr,size,retval)                      \
181 do {                                                            \
182         retval = 0;                                             \
183         switch (size) {                                         \
184           case 1: __put_user_asm(x,ptr,retval,"move.b"); break; \
185           case 2: __put_user_asm(x,ptr,retval,"move.w"); break; \
186           case 4: __put_user_asm(x,ptr,retval,"move.d"); break; \
187           case 8: __put_user_asm_64(x,ptr,retval); break;       \
188           default: __put_user_bad();                            \
189         }                                                       \
190 } while (0)
191
192 struct __large_struct { unsigned long buf[100]; };
193 #define __m(x) (*(struct __large_struct *)(x))
194
195 /*
196  * We don't tell gcc that we are accessing memory, but this is OK
197  * because we do not write to any memory gcc knows about, so there
198  * are no aliasing issues.
199  *
200  * Note that PC at a fault is the address *after* the faulting
201  * instruction.
202  */
203 #define __put_user_asm(x, addr, err, op)                        \
204         __asm__ __volatile__(                                   \
205                 "       "op" %1,[%2]\n"                         \
206                 "2:\n"                                          \
207                 "       .section .fixup,\"ax\"\n"               \
208                 "3:     move.d %3,%0\n"                         \
209                 "       jump 2b\n"                              \
210                 "       .previous\n"                            \
211                 "       .section __ex_table,\"a\"\n"            \
212                 "       .dword 2b,3b\n"                         \
213                 "       .previous\n"                            \
214                 : "=r" (err)                                    \
215                 : "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
216
217 #define __put_user_asm_64(x, addr, err)                         \
218         __asm__ __volatile__(                                   \
219                 "       move.d %M1,[%2]\n"                      \
220                 "2:     move.d %H1,[%2+4]\n"                    \
221                 "4:\n"                                          \
222                 "       .section .fixup,\"ax\"\n"               \
223                 "3:     move.d %3,%0\n"                         \
224                 "       jump 4b\n"                              \
225                 "       .previous\n"                            \
226                 "       .section __ex_table,\"a\"\n"            \
227                 "       .dword 2b,3b\n"                         \
228                 "       .dword 4b,3b\n"                         \
229                 "       .previous\n"                            \
230                 : "=r" (err)                                    \
231                 : "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
232
233
234 #define __get_user_nocheck(x,ptr,size)                          \
235 ({                                                              \
236         long __gu_err, __gu_val;                                \
237         __get_user_size(__gu_val,(ptr),(size),__gu_err);        \
238         (x) = (__typeof__(*(ptr)))__gu_val;                     \
239         __gu_err;                                               \
240 })
241
242 #define __get_user_check(x,ptr,size)                                    \
243 ({                                                                      \
244         long __gu_err = -EFAULT, __gu_val = 0;                          \
245         const __typeof__(*(ptr)) *__gu_addr = (ptr);                    \
246         if (access_ok(VERIFY_READ,__gu_addr,size))                      \
247                 __get_user_size(__gu_val,__gu_addr,(size),__gu_err);    \
248         (x) = (__typeof__(*(ptr)))__gu_val;                             \
249         __gu_err;                                                       \
250 })
251
252 extern long __get_user_bad(void);
253
254 #define __get_user_size(x,ptr,size,retval)                      \
255 do {                                                            \
256         retval = 0;                                             \
257         switch (size) {                                         \
258           case 1: __get_user_asm(x,ptr,retval,"move.b"); break; \
259           case 2: __get_user_asm(x,ptr,retval,"move.w"); break; \
260           case 4: __get_user_asm(x,ptr,retval,"move.d"); break; \
261           case 8: __get_user_asm_64(x,ptr,retval); break;       \
262           default: (x) = __get_user_bad();                      \
263         }                                                       \
264 } while (0)
265
266 /* See comment before __put_user_asm.  */
267
268 #define __get_user_asm(x, addr, err, op)                \
269         __asm__ __volatile__(                           \
270                 "       "op" [%2],%1\n"                 \
271                 "2:\n"                                  \
272                 "       .section .fixup,\"ax\"\n"       \
273                 "3:     move.d %3,%0\n"                 \
274                 "       moveq 0,%1\n"                   \
275                 "       jump 2b\n"                      \
276                 "       .previous\n"                    \
277                 "       .section __ex_table,\"a\"\n"    \
278                 "       .dword 2b,3b\n"                 \
279                 "       .previous\n"                    \
280                 : "=r" (err), "=r" (x)                  \
281                 : "r" (addr), "g" (-EFAULT), "0" (err))
282
283 #define __get_user_asm_64(x, addr, err)                 \
284         __asm__ __volatile__(                           \
285                 "       move.d [%2],%M1\n"              \
286                 "2:     move.d [%2+4],%H1\n"            \
287                 "4:\n"                                  \
288                 "       .section .fixup,\"ax\"\n"       \
289                 "3:     move.d %3,%0\n"                 \
290                 "       moveq 0,%1\n"                   \
291                 "       jump 4b\n"                      \
292                 "       .previous\n"                    \
293                 "       .section __ex_table,\"a\"\n"    \
294                 "       .dword 2b,3b\n"                 \
295                 "       .dword 4b,3b\n"                 \
296                 "       .previous\n"                    \
297                 : "=r" (err), "=r" (x)                  \
298                 : "r" (addr), "g" (-EFAULT), "0" (err))
299
300 /* More complex functions.  Most are inline, but some call functions that
301    live in lib/usercopy.c  */
302
303 extern unsigned long __copy_user(void *to, const void *from, unsigned long n);
304 extern unsigned long __copy_user_zeroing(void *to, const void *from, unsigned long n);
305 extern unsigned long __do_clear_user(void *to, unsigned long n);
306
307 /*
308  * Copy a null terminated string from userspace.
309  *
310  * Must return:
311  * -EFAULT              for an exception
312  * count                if we hit the buffer limit
313  * bytes copied         if we hit a null byte
314  * (without the null byte)
315  */
316
317 extern inline long         
318 __do_strncpy_from_user(char *dst, const char *src, long count)
319 {
320         long res;
321
322         if (count == 0)
323                 return 0;
324
325         /*
326          * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
327          *  So do we.
328          *
329          *  This code is deduced from:
330          *
331          *      char tmp2;
332          *      long tmp1, tmp3 
333          *      tmp1 = count;
334          *      while ((*dst++ = (tmp2 = *src++)) != 0
335          *             && --tmp1)
336          *        ;
337          *
338          *      res = count - tmp1;
339          *
340          *  with tweaks.
341          */
342
343         __asm__ __volatile__ (
344                 "       move.d %3,%0\n"
345                 "       move.b [%2+],$r9\n"
346                 "1:     beq 2f\n"
347                 "       move.b $r9,[%1+]\n"
348
349                 "       subq 1,%0\n"
350                 "       bne 1b\n"
351                 "       move.b [%2+],$r9\n"
352
353                 "2:     sub.d %3,%0\n"
354                 "       neg.d %0,%0\n"
355                 "3:\n"
356                 "       .section .fixup,\"ax\"\n"
357                 "4:     move.d %7,%0\n"
358                 "       jump 3b\n"
359
360                 /* There's one address for a fault at the first move, and
361                    two possible PC values for a fault at the second move,
362                    being a delay-slot filler.  However, the branch-target
363                    for the second move is the same as the first address.
364                    Just so you don't get confused...  */
365                 "       .previous\n"
366                 "       .section __ex_table,\"a\"\n"
367                 "       .dword 1b,4b\n"
368                 "       .dword 2b,4b\n"
369                 "       .previous"
370                 : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
371                 : "3" (count), "1" (dst), "2" (src), "g" (-EFAULT)
372                 : "r9");
373
374         return res;
375 }
376
377 extern inline unsigned long
378 __generic_copy_to_user(void *to, const void *from, unsigned long n)
379 {
380         if (access_ok(VERIFY_WRITE, to, n))
381                 return __copy_user(to,from,n);
382         return n;
383 }
384
385 extern inline unsigned long
386 __generic_copy_from_user(void *to, const void *from, unsigned long n)
387 {
388         if (access_ok(VERIFY_READ, from, n))
389                 return __copy_user_zeroing(to,from,n);
390         return n;
391 }
392
393 extern inline unsigned long
394 __generic_clear_user(void *to, unsigned long n)
395 {
396         if (access_ok(VERIFY_WRITE, to, n))
397                 return __do_clear_user(to,n);
398         return n;
399 }
400
401 extern inline long
402 __strncpy_from_user(char *dst, const char *src, long count)
403 {
404         return __do_strncpy_from_user(dst, src, count);
405 }
406
407 extern inline long
408 strncpy_from_user(char *dst, const char *src, long count)
409 {
410         long res = -EFAULT;
411         if (access_ok(VERIFY_READ, src, 1))
412                 res = __do_strncpy_from_user(dst, src, count);
413         return res;
414 }
415
416 /* A few copy asms to build up the more complex ones from.
417
418    Note again, a post-increment is performed regardless of whether a bus
419    fault occurred in that instruction, and PC for a faulted insn is the
420    address *after* the insn.  */
421
422 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
423         __asm__ __volatile__ (                          \
424                         COPY                            \
425                 "1:\n"                                  \
426                 "       .section .fixup,\"ax\"\n"       \
427                         FIXUP                           \
428                 "       jump 1b\n"                      \
429                 "       .previous\n"                    \
430                 "       .section __ex_table,\"a\"\n"    \
431                         TENTRY                          \
432                 "       .previous\n"                    \
433                 : "=r" (to), "=r" (from), "=r" (ret)    \
434                 : "0" (to), "1" (from), "2" (ret)       \
435                 : "r9", "memory")
436
437 #define __asm_copy_from_user_1(to, from, ret) \
438         __asm_copy_user_cont(to, from, ret,     \
439                 "       move.b [%1+],$r9\n"     \
440                 "2:     move.b $r9,[%0+]\n",    \
441                 "3:     addq 1,%2\n"            \
442                 "       clear.b [%0+]\n",       \
443                 "       .dword 2b,3b\n")
444
445 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
446         __asm_copy_user_cont(to, from, ret,             \
447                 "       move.w [%1+],$r9\n"             \
448                 "2:     move.w $r9,[%0+]\n" COPY,       \
449                 "3:     addq 2,%2\n"                    \
450                 "       clear.w [%0+]\n" FIXUP,         \
451                 "       .dword 2b,3b\n" TENTRY)
452
453 #define __asm_copy_from_user_2(to, from, ret) \
454         __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
455
456 #define __asm_copy_from_user_3(to, from, ret)           \
457         __asm_copy_from_user_2x_cont(to, from, ret,     \
458                 "       move.b [%1+],$r9\n"             \
459                 "4:     move.b $r9,[%0+]\n",            \
460                 "5:     addq 1,%2\n"                    \
461                 "       clear.b [%0+]\n",               \
462                 "       .dword 4b,5b\n")
463
464 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
465         __asm_copy_user_cont(to, from, ret,             \
466                 "       move.d [%1+],$r9\n"             \
467                 "2:     move.d $r9,[%0+]\n" COPY,       \
468                 "3:     addq 4,%2\n"                    \
469                 "       clear.d [%0+]\n" FIXUP,         \
470                 "       .dword 2b,3b\n" TENTRY)
471
472 #define __asm_copy_from_user_4(to, from, ret) \
473         __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
474
475 #define __asm_copy_from_user_5(to, from, ret) \
476         __asm_copy_from_user_4x_cont(to, from, ret,     \
477                 "       move.b [%1+],$r9\n"             \
478                 "4:     move.b $r9,[%0+]\n",            \
479                 "5:     addq 1,%2\n"                    \
480                 "       clear.b [%0+]\n",               \
481                 "       .dword 4b,5b\n")
482
483 #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
484         __asm_copy_from_user_4x_cont(to, from, ret,     \
485                 "       move.w [%1+],$r9\n"             \
486                 "4:     move.w $r9,[%0+]\n" COPY,       \
487                 "5:     addq 2,%2\n"                    \
488                 "       clear.w [%0+]\n" FIXUP,         \
489                 "       .dword 4b,5b\n" TENTRY)
490
491 #define __asm_copy_from_user_6(to, from, ret) \
492         __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
493
494 #define __asm_copy_from_user_7(to, from, ret) \
495         __asm_copy_from_user_6x_cont(to, from, ret,     \
496                 "       move.b [%1+],$r9\n"             \
497                 "6:     move.b $r9,[%0+]\n",            \
498                 "7:     addq 1,%2\n"                    \
499                 "       clear.b [%0+]\n",               \
500                 "       .dword 6b,7b\n")
501
502 #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
503         __asm_copy_from_user_4x_cont(to, from, ret,     \
504                 "       move.d [%1+],$r9\n"             \
505                 "4:     move.d $r9,[%0+]\n" COPY,       \
506                 "5:     addq 4,%2\n"                    \
507                 "       clear.d [%0+]\n" FIXUP,         \
508                 "       .dword 4b,5b\n" TENTRY)
509
510 #define __asm_copy_from_user_8(to, from, ret) \
511         __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
512
513 #define __asm_copy_from_user_9(to, from, ret) \
514         __asm_copy_from_user_8x_cont(to, from, ret,     \
515                 "       move.b [%1+],$r9\n"             \
516                 "6:     move.b $r9,[%0+]\n",            \
517                 "7:     addq 1,%2\n"                    \
518                 "       clear.b [%0+]\n",               \
519                 "       .dword 6b,7b\n")
520
521 #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
522         __asm_copy_from_user_8x_cont(to, from, ret,     \
523                 "       move.w [%1+],$r9\n"             \
524                 "6:     move.w $r9,[%0+]\n" COPY,       \
525                 "7:     addq 2,%2\n"                    \
526                 "       clear.w [%0+]\n" FIXUP,         \
527                 "       .dword 6b,7b\n" TENTRY)
528
529 #define __asm_copy_from_user_10(to, from, ret) \
530         __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
531
532 #define __asm_copy_from_user_11(to, from, ret)          \
533         __asm_copy_from_user_10x_cont(to, from, ret,    \
534                 "       move.b [%1+],$r9\n"             \
535                 "8:     move.b $r9,[%0+]\n",            \
536                 "9:     addq 1,%2\n"                    \
537                 "       clear.b [%0+]\n",               \
538                 "       .dword 8b,9b\n")
539
540 #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
541         __asm_copy_from_user_8x_cont(to, from, ret,     \
542                 "       move.d [%1+],$r9\n"             \
543                 "6:     move.d $r9,[%0+]\n" COPY,       \
544                 "7:     addq 4,%2\n"                    \
545                 "       clear.d [%0+]\n" FIXUP,         \
546                 "       .dword 6b,7b\n" TENTRY)
547
548 #define __asm_copy_from_user_12(to, from, ret) \
549         __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
550
551 #define __asm_copy_from_user_13(to, from, ret) \
552         __asm_copy_from_user_12x_cont(to, from, ret,    \
553                 "       move.b [%1+],$r9\n"             \
554                 "8:     move.b $r9,[%0+]\n",            \
555                 "9:     addq 1,%2\n"                    \
556                 "       clear.b [%0+]\n",               \
557                 "       .dword 8b,9b\n")
558
559 #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
560         __asm_copy_from_user_12x_cont(to, from, ret,    \
561                 "       move.w [%1+],$r9\n"             \
562                 "8:     move.w $r9,[%0+]\n" COPY,       \
563                 "9:     addq 2,%2\n"                    \
564                 "       clear.w [%0+]\n" FIXUP,         \
565                 "       .dword 8b,9b\n" TENTRY)
566
567 #define __asm_copy_from_user_14(to, from, ret) \
568         __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
569
570 #define __asm_copy_from_user_15(to, from, ret) \
571         __asm_copy_from_user_14x_cont(to, from, ret,    \
572                 "       move.b [%1+],$r9\n"             \
573                 "10:    move.b $r9,[%0+]\n",            \
574                 "11:    addq 1,%2\n"                    \
575                 "       clear.b [%0+]\n",               \
576                 "       .dword 10b,11b\n")
577
578 #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
579         __asm_copy_from_user_12x_cont(to, from, ret,    \
580                 "       move.d [%1+],$r9\n"             \
581                 "8:     move.d $r9,[%0+]\n" COPY,       \
582                 "9:     addq 4,%2\n"                    \
583                 "       clear.d [%0+]\n" FIXUP,         \
584                 "       .dword 8b,9b\n" TENTRY)
585
586 #define __asm_copy_from_user_16(to, from, ret) \
587         __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
588
589 #define __asm_copy_from_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
590         __asm_copy_from_user_16x_cont(to, from, ret,    \
591                 "       move.d [%1+],$r9\n"             \
592                 "10:    move.d $r9,[%0+]\n" COPY,       \
593                 "11:    addq 4,%2\n"                    \
594                 "       clear.d [%0+]\n" FIXUP,         \
595                 "       .dword 10b,11b\n" TENTRY)
596
597 #define __asm_copy_from_user_20(to, from, ret) \
598         __asm_copy_from_user_20x_cont(to, from, ret, "", "", "")
599
600 #define __asm_copy_from_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
601         __asm_copy_from_user_20x_cont(to, from, ret,    \
602                 "       move.d [%1+],$r9\n"             \
603                 "12:    move.d $r9,[%0+]\n" COPY,       \
604                 "13:    addq 4,%2\n"                    \
605                 "       clear.d [%0+]\n" FIXUP,         \
606                 "       .dword 12b,13b\n" TENTRY)
607
608 #define __asm_copy_from_user_24(to, from, ret) \
609         __asm_copy_from_user_24x_cont(to, from, ret, "", "", "")
610
611 /* And now, the to-user ones.  */
612
613 #define __asm_copy_to_user_1(to, from, ret)     \
614         __asm_copy_user_cont(to, from, ret,     \
615                 "       move.b [%1+],$r9\n"     \
616                 "       move.b $r9,[%0+]\n2:\n",        \
617                 "3:     addq 1,%2\n",           \
618                 "       .dword 2b,3b\n")
619
620 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
621         __asm_copy_user_cont(to, from, ret,             \
622                 "       move.w [%1+],$r9\n"             \
623                 "       move.w $r9,[%0+]\n2:\n" COPY,   \
624                 "3:     addq 2,%2\n" FIXUP,             \
625                 "       .dword 2b,3b\n" TENTRY)
626
627 #define __asm_copy_to_user_2(to, from, ret) \
628         __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
629
630 #define __asm_copy_to_user_3(to, from, ret) \
631         __asm_copy_to_user_2x_cont(to, from, ret,       \
632                 "       move.b [%1+],$r9\n"             \
633                 "       move.b $r9,[%0+]\n4:\n",                \
634                 "5:     addq 1,%2\n",                   \
635                 "       .dword 4b,5b\n")
636
637 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
638         __asm_copy_user_cont(to, from, ret,             \
639                 "       move.d [%1+],$r9\n"             \
640                 "       move.d $r9,[%0+]\n2:\n" COPY,   \
641                 "3:     addq 4,%2\n" FIXUP,             \
642                 "       .dword 2b,3b\n" TENTRY)
643
644 #define __asm_copy_to_user_4(to, from, ret) \
645         __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
646
647 #define __asm_copy_to_user_5(to, from, ret) \
648         __asm_copy_to_user_4x_cont(to, from, ret,       \
649                 "       move.b [%1+],$r9\n"             \
650                 "       move.b $r9,[%0+]\n4:\n",                \
651                 "5:     addq 1,%2\n",                   \
652                 "       .dword 4b,5b\n")
653
654 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
655         __asm_copy_to_user_4x_cont(to, from, ret,       \
656                 "       move.w [%1+],$r9\n"             \
657                 "       move.w $r9,[%0+]\n4:\n" COPY,   \
658                 "5:     addq 2,%2\n" FIXUP,             \
659                 "       .dword 4b,5b\n" TENTRY)
660
661 #define __asm_copy_to_user_6(to, from, ret) \
662         __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
663
664 #define __asm_copy_to_user_7(to, from, ret) \
665         __asm_copy_to_user_6x_cont(to, from, ret,       \
666                 "       move.b [%1+],$r9\n"             \
667                 "       move.b $r9,[%0+]\n6:\n",                \
668                 "7:     addq 1,%2\n",                   \
669                 "       .dword 6b,7b\n")
670
671 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
672         __asm_copy_to_user_4x_cont(to, from, ret,       \
673                 "       move.d [%1+],$r9\n"             \
674                 "       move.d $r9,[%0+]\n4:\n" COPY,   \
675                 "5:     addq 4,%2\n"  FIXUP,            \
676                 "       .dword 4b,5b\n" TENTRY)
677
678 #define __asm_copy_to_user_8(to, from, ret) \
679         __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
680
681 #define __asm_copy_to_user_9(to, from, ret) \
682         __asm_copy_to_user_8x_cont(to, from, ret,       \
683                 "       move.b [%1+],$r9\n"             \
684                 "       move.b $r9,[%0+]\n6:\n",                \
685                 "7:     addq 1,%2\n",                   \
686                 "       .dword 6b,7b\n")
687
688 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
689         __asm_copy_to_user_8x_cont(to, from, ret,       \
690                 "       move.w [%1+],$r9\n"             \
691                 "       move.w $r9,[%0+]\n6:\n" COPY,   \
692                 "7:     addq 2,%2\n" FIXUP,             \
693                 "       .dword 6b,7b\n" TENTRY)
694
695 #define __asm_copy_to_user_10(to, from, ret) \
696         __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
697
698 #define __asm_copy_to_user_11(to, from, ret) \
699         __asm_copy_to_user_10x_cont(to, from, ret,      \
700                 "       move.b [%1+],$r9\n"             \
701                 "       move.b $r9,[%0+]\n8:\n",                \
702                 "9:     addq 1,%2\n",                   \
703                 "       .dword 8b,9b\n")
704
705 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
706         __asm_copy_to_user_8x_cont(to, from, ret,       \
707                 "       move.d [%1+],$r9\n"             \
708                 "       move.d $r9,[%0+]\n6:\n" COPY,   \
709                 "7:     addq 4,%2\n" FIXUP,             \
710                 "       .dword 6b,7b\n" TENTRY)
711
712 #define __asm_copy_to_user_12(to, from, ret) \
713         __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
714
715 #define __asm_copy_to_user_13(to, from, ret) \
716         __asm_copy_to_user_12x_cont(to, from, ret,      \
717                 "       move.b [%1+],$r9\n"             \
718                 "       move.b $r9,[%0+]\n8:\n",                \
719                 "9:     addq 1,%2\n",                   \
720                 "       .dword 8b,9b\n")
721
722 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
723         __asm_copy_to_user_12x_cont(to, from, ret,      \
724                 "       move.w [%1+],$r9\n"             \
725                 "       move.w $r9,[%0+]\n8:\n" COPY,   \
726                 "9:     addq 2,%2\n" FIXUP,             \
727                 "       .dword 8b,9b\n" TENTRY)
728
729 #define __asm_copy_to_user_14(to, from, ret)    \
730         __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
731
732 #define __asm_copy_to_user_15(to, from, ret) \
733         __asm_copy_to_user_14x_cont(to, from, ret,      \
734                 "       move.b [%1+],$r9\n"             \
735                 "       move.b $r9,[%0+]\n10:\n",               \
736                 "11:    addq 1,%2\n",                   \
737                 "       .dword 10b,11b\n")
738
739 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
740         __asm_copy_to_user_12x_cont(to, from, ret,      \
741                 "       move.d [%1+],$r9\n"             \
742                 "       move.d $r9,[%0+]\n8:\n" COPY,   \
743                 "9:     addq 4,%2\n" FIXUP,             \
744                 "       .dword 8b,9b\n" TENTRY)
745
746 #define __asm_copy_to_user_16(to, from, ret) \
747         __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
748
749 #define __asm_copy_to_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
750         __asm_copy_to_user_16x_cont(to, from, ret,      \
751                 "       move.d [%1+],$r9\n"             \
752                 "       move.d $r9,[%0+]\n10:\n" COPY,  \
753                 "11:    addq 4,%2\n" FIXUP,             \
754                 "       .dword 10b,11b\n" TENTRY)
755
756 #define __asm_copy_to_user_20(to, from, ret) \
757         __asm_copy_to_user_20x_cont(to, from, ret, "", "", "")
758
759 #define __asm_copy_to_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
760         __asm_copy_to_user_20x_cont(to, from, ret,      \
761                 "       move.d [%1+],$r9\n"             \
762                 "       move.d $r9,[%0+]\n12:\n" COPY,  \
763                 "13:    addq 4,%2\n" FIXUP,             \
764                 "       .dword 12b,13b\n" TENTRY)
765
766 #define __asm_copy_to_user_24(to, from, ret)    \
767         __asm_copy_to_user_24x_cont(to, from, ret, "", "", "")
768
769 /* Define a few clearing asms with exception handlers.  */
770
771 /* This frame-asm is like the __asm_copy_user_cont one, but has one less
772    input.  */
773
774 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
775         __asm__ __volatile__ (                          \
776                         CLEAR                           \
777                 "1:\n"                                  \
778                 "       .section .fixup,\"ax\"\n"       \
779                         FIXUP                           \
780                 "       jump 1b\n"                      \
781                 "       .previous\n"                    \
782                 "       .section __ex_table,\"a\"\n"    \
783                         TENTRY                          \
784                 "       .previous"                      \
785                 : "=r" (to), "=r" (ret)                 \
786                 : "0" (to), "1" (ret)                   \
787                 : "memory")
788
789 #define __asm_clear_1(to, ret) \
790         __asm_clear(to, ret,                    \
791                 "       clear.b [%0+]\n2:\n",   \
792                 "3:     addq 1,%1\n",           \
793                 "       .dword 2b,3b\n")
794
795 #define __asm_clear_2(to, ret) \
796         __asm_clear(to, ret,                    \
797                 "       clear.w [%0+]\n2:\n",   \
798                 "3:     addq 2,%1\n",           \
799                 "       .dword 2b,3b\n")
800
801 #define __asm_clear_3(to, ret) \
802      __asm_clear(to, ret,                       \
803                  "      clear.w [%0+]\n"        \
804                  "2:    clear.b [%0+]\n3:\n",   \
805                  "4:    addq 2,%1\n"            \
806                  "5:    addq 1,%1\n",           \
807                  "      .dword 2b,4b\n"         \
808                  "      .dword 3b,5b\n")
809
810 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
811         __asm_clear(to, ret,                            \
812                 "       clear.d [%0+]\n2:\n" CLEAR,     \
813                 "3:     addq 4,%1\n" FIXUP,             \
814                 "       .dword 2b,3b\n" TENTRY)
815
816 #define __asm_clear_4(to, ret) \
817         __asm_clear_4x_cont(to, ret, "", "", "")
818
819 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
820         __asm_clear_4x_cont(to, ret,                    \
821                 "       clear.d [%0+]\n4:\n" CLEAR,     \
822                 "5:     addq 4,%1\n" FIXUP,             \
823                 "       .dword 4b,5b\n" TENTRY)
824
825 #define __asm_clear_8(to, ret) \
826         __asm_clear_8x_cont(to, ret, "", "", "")
827
828 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
829         __asm_clear_8x_cont(to, ret,                    \
830                 "       clear.d [%0+]\n6:\n" CLEAR,     \
831                 "7:     addq 4,%1\n" FIXUP,             \
832                 "       .dword 6b,7b\n" TENTRY)
833
834 #define __asm_clear_12(to, ret) \
835         __asm_clear_12x_cont(to, ret, "", "", "")
836
837 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
838         __asm_clear_12x_cont(to, ret,                   \
839                 "       clear.d [%0+]\n8:\n" CLEAR,     \
840                 "9:     addq 4,%1\n" FIXUP,             \
841                 "       .dword 8b,9b\n" TENTRY)
842
843 #define __asm_clear_16(to, ret) \
844         __asm_clear_16x_cont(to, ret, "", "", "")
845
846 #define __asm_clear_20x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
847         __asm_clear_16x_cont(to, ret,                   \
848                 "       clear.d [%0+]\n10:\n" CLEAR,    \
849                 "11:    addq 4,%1\n" FIXUP,             \
850                 "       .dword 10b,11b\n" TENTRY)
851
852 #define __asm_clear_20(to, ret) \
853         __asm_clear_20x_cont(to, ret, "", "", "")
854
855 #define __asm_clear_24x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
856         __asm_clear_20x_cont(to, ret,                   \
857                 "       clear.d [%0+]\n12:\n" CLEAR,    \
858                 "13:    addq 4,%1\n" FIXUP,             \
859                 "       .dword 12b,13b\n" TENTRY)
860
861 #define __asm_clear_24(to, ret) \
862         __asm_clear_24x_cont(to, ret, "", "", "")
863
864 /* Note that if these expand awfully if made into switch constructs, so
865    don't do that.  */
866
867 extern inline unsigned long
868 __constant_copy_from_user(void *to, const void *from, unsigned long n)
869 {
870         unsigned long ret = 0;
871         if (n == 0)
872                 ;
873         else if (n == 1)
874                 __asm_copy_from_user_1(to, from, ret);
875         else if (n == 2)
876                 __asm_copy_from_user_2(to, from, ret);
877         else if (n == 3)
878                 __asm_copy_from_user_3(to, from, ret);
879         else if (n == 4)
880                 __asm_copy_from_user_4(to, from, ret);
881         else if (n == 5)
882                 __asm_copy_from_user_5(to, from, ret);
883         else if (n == 6)
884                 __asm_copy_from_user_6(to, from, ret);
885         else if (n == 7)
886                 __asm_copy_from_user_7(to, from, ret);
887         else if (n == 8)
888                 __asm_copy_from_user_8(to, from, ret);
889         else if (n == 9)
890                 __asm_copy_from_user_9(to, from, ret);
891         else if (n == 10)
892                 __asm_copy_from_user_10(to, from, ret);
893         else if (n == 11)
894                 __asm_copy_from_user_11(to, from, ret);
895         else if (n == 12)
896                 __asm_copy_from_user_12(to, from, ret);
897         else if (n == 13)
898                 __asm_copy_from_user_13(to, from, ret);
899         else if (n == 14)
900                 __asm_copy_from_user_14(to, from, ret);
901         else if (n == 15)
902                 __asm_copy_from_user_15(to, from, ret);
903         else if (n == 16)
904                 __asm_copy_from_user_16(to, from, ret);
905         else if (n == 20)
906                 __asm_copy_from_user_20(to, from, ret);
907         else if (n == 24)
908                 __asm_copy_from_user_24(to, from, ret);
909         else
910                 ret = __generic_copy_from_user(to, from, n);
911
912         return ret;
913 }
914
915 /* Ditto, don't make a switch out of this.  */
916
917 extern inline unsigned long
918 __constant_copy_to_user(void *to, const void *from, unsigned long n)
919 {
920         unsigned long ret = 0;
921         if (n == 0)
922                 ;
923         else if (n == 1)
924                 __asm_copy_to_user_1(to, from, ret);
925         else if (n == 2)
926                 __asm_copy_to_user_2(to, from, ret);
927         else if (n == 3)
928                 __asm_copy_to_user_3(to, from, ret);
929         else if (n == 4)
930                 __asm_copy_to_user_4(to, from, ret);
931         else if (n == 5)
932                 __asm_copy_to_user_5(to, from, ret);
933         else if (n == 6)
934                 __asm_copy_to_user_6(to, from, ret);
935         else if (n == 7)
936                 __asm_copy_to_user_7(to, from, ret);
937         else if (n == 8)
938                 __asm_copy_to_user_8(to, from, ret);
939         else if (n == 9)
940                 __asm_copy_to_user_9(to, from, ret);
941         else if (n == 10)
942                 __asm_copy_to_user_10(to, from, ret);
943         else if (n == 11)
944                 __asm_copy_to_user_11(to, from, ret);
945         else if (n == 12)
946                 __asm_copy_to_user_12(to, from, ret);
947         else if (n == 13)
948                 __asm_copy_to_user_13(to, from, ret);
949         else if (n == 14)
950                 __asm_copy_to_user_14(to, from, ret);
951         else if (n == 15)
952                 __asm_copy_to_user_15(to, from, ret);
953         else if (n == 16)
954                 __asm_copy_to_user_16(to, from, ret);
955         else if (n == 20)
956                 __asm_copy_to_user_20(to, from, ret);
957         else if (n == 24)
958                 __asm_copy_to_user_24(to, from, ret);
959         else
960                 ret = __generic_copy_to_user(to, from, n);
961
962         return ret;
963 }
964
965 /* No switch, please.  */
966
967 extern inline unsigned long
968 __constant_clear_user(void *to, unsigned long n)
969 {
970         unsigned long ret = 0;
971         if (n == 0)
972                 ;
973         else if (n == 1)
974                 __asm_clear_1(to, ret);
975         else if (n == 2)
976                 __asm_clear_2(to, ret);
977         else if (n == 3)
978                 __asm_clear_3(to, ret);
979         else if (n == 4)
980                 __asm_clear_4(to, ret);
981         else if (n == 8)
982                 __asm_clear_8(to, ret);
983         else if (n == 12)
984                 __asm_clear_12(to, ret);
985         else if (n == 16)
986                 __asm_clear_16(to, ret);
987         else if (n == 20)
988                 __asm_clear_20(to, ret);
989         else if (n == 24)
990                 __asm_clear_24(to, ret);
991         else
992                 ret = __generic_clear_user(to, n);
993
994         return ret;
995 }
996
997
998 #define clear_user(to, n)                       \
999 (__builtin_constant_p(n) ?                      \
1000  __constant_clear_user(to, n) :                 \
1001  __generic_clear_user(to, n))
1002
1003 #define copy_from_user(to, from, n)             \
1004 (__builtin_constant_p(n) ?                      \
1005  __constant_copy_from_user(to, from, n) :       \
1006  __generic_copy_from_user(to, from, n))
1007
1008 #define copy_to_user(to, from, n)               \
1009 (__builtin_constant_p(n) ?                      \
1010  __constant_copy_to_user(to, from, n) :         \
1011  __generic_copy_to_user(to, from, n))
1012
1013 /* We let the __ versions of copy_from/to_user inline, because they're often
1014  * used in fast paths and have only a small space overhead.
1015  */
1016
1017 extern inline unsigned long
1018 __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
1019 {
1020         return __copy_user_zeroing(to,from,n);
1021 }
1022
1023 extern inline unsigned long
1024 __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
1025 {
1026         return __copy_user(to,from,n);
1027 }
1028
1029 extern inline unsigned long
1030 __generic_clear_user_nocheck(void *to, unsigned long n)
1031 {
1032         return __do_clear_user(to,n);
1033 }
1034
1035 /* without checking */
1036
1037 #define __copy_to_user(to,from,n)   __generic_copy_to_user_nocheck((to),(from),(n))
1038 #define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))
1039 #define __clear_user(to,n) __generic_clear_user_nocheck((to),(n))
1040
1041 /*
1042  * Return the size of a string (including the ending 0)
1043  *
1044  * Return length of string in userspace including terminating 0
1045  * or 0 for error.  Return a value greater than N if too long.
1046  */
1047
1048 extern inline long
1049 strnlen_user(const char *s, long n)
1050 {
1051         long res, tmp1;
1052
1053         if (!access_ok(VERIFY_READ, s, 0))
1054                 return 0;
1055
1056         /*
1057          * This code is deduced from:
1058          *
1059          *      tmp1 = n;
1060          *      while (tmp1-- > 0 && *s++)
1061          *        ;
1062          *
1063          *      res = n - tmp1;
1064          *
1065          *  (with tweaks).
1066          */
1067
1068         __asm__ __volatile__ (
1069                 "       move.d %1,$r9\n"
1070                 "0:\n"
1071                 "       ble 1f\n"
1072                 "       subq 1,$r9\n"
1073
1074                 "       test.b [%0+]\n"
1075                 "       bne 0b\n"
1076                 "       test.d $r9\n"
1077                 "1:\n"
1078                 "       move.d %1,%0\n"
1079                 "       sub.d $r9,%0\n"
1080                 "2:\n"
1081                 "       .section .fixup,\"ax\"\n"
1082
1083                 "3:     clear.d %0\n"
1084                 "       jump 2b\n"
1085
1086                 /* There's one address for a fault at the first move, and
1087                    two possible PC values for a fault at the second move,
1088                    being a delay-slot filler.  However, the branch-target
1089                    for the second move is the same as the first address.
1090                    Just so you don't get confused...  */
1091                 "       .previous\n"
1092                 "       .section __ex_table,\"a\"\n"
1093                 "       .dword 0b,3b\n"
1094                 "       .dword 1b,3b\n"
1095                 "       .previous\n"
1096                 : "=r" (res), "=r" (tmp1)
1097                 : "0" (s), "1" (n)
1098                 : "r9");
1099
1100         return res;
1101 }
1102
1103 #define strlen_user(str)        strnlen_user((str), 0x7ffffffe)
1104
1105 #endif  /* __ASSEMBLY__ */
1106
1107 #endif  /* _CRIS_UACCESS_H */