import of ftp.dlink.com/GPL/DSMG-600_reB/ppclinux.tar.gz
[linux-2.4.21-pre4.git] / include / asm-cris / uaccess.h
1 /* 
2  * Authors:    Bjorn Wesen (bjornw@axis.com)
3  *             Hans-Peter Nilsson (hp@axis.com)
4  *
5  * $Log: uaccess.h,v $
6  * Revision 1.1.1.1  2005/04/11 02:50:52  jack
7  * first release
8  *
9  * Revision 1.1.1.1  2005/01/10 13:16:55  jack
10  * First release
11  *
12  * Revision 1.9  2002/11/20 18:20:17  hp
13  * Make all static inline functions extern inline.
14  *
15  * Revision 1.8  2001/10/29 13:01:48  bjornw
16  * Removed unused variable tmp2 in strnlen_user
17  *
18  * Revision 1.7  2001/10/02 12:44:52  hp
19  * Add support for 64-bit put_user/get_user
20  *
21  * Revision 1.6  2001/10/01 14:51:17  bjornw
22  * Added register prefixes and removed underscores
23  *
24  * Revision 1.5  2000/10/25 03:33:21  hp
25  * - Provide implementation for everything else but get_user and put_user;
26  *   copying inline to/from user for constant length 0..16, 20, 24, and
27  *   clearing for 0..4, 8, 12, 16, 20, 24, strncpy_from_user and strnlen_user
28  *   always inline.
29  * - Constraints for destination addr in get_user cannot be memory, only reg.
30  * - Correct labels for PC at expected fault points.
31  * - Nits with assembly code.
32  * - Don't use statement expressions without value; use "do {} while (0)".
33  * - Return correct values from __generic_... functions.
34  *
35  * Revision 1.4  2000/09/12 16:28:25  bjornw
36  * * Removed comments from the get/put user asm code
37  * * Constrains for destination addr in put_user cannot be memory, only reg
38  *
39  * Revision 1.3  2000/09/12 14:30:20  bjornw
40  * MAX_ADDR_USER does not exist anymore
41  *
42  * Revision 1.2  2000/07/13 15:52:48  bjornw
43  * New user-access functions
44  *
45  * Revision 1.1.1.1  2000/07/10 16:32:31  bjornw
46  * CRIS architecture, working draft
47  *
48  *
49  *
50  */
51
52 /* Asm:s have been tweaked (within the domain of correctness) to give
53    satisfactory results for "gcc version 2.96 20000427 (experimental)".
54
55    Check regularly...
56
57    Register $r9 is chosen for temporaries, being a call-clobbered register
58    first in line to be used (notably for local blocks), not colliding with
59    parameter registers.  */
60
61 #ifndef _CRIS_UACCESS_H
62 #define _CRIS_UACCESS_H
63
64 #ifndef __ASSEMBLY__
65 #include <linux/sched.h>
66 #include <linux/errno.h>
67 #include <asm/processor.h>
68 #include <asm/page.h>
69
70 #define VERIFY_READ     0
71 #define VERIFY_WRITE    1
72
73 /*
74  * The fs value determines whether argument validity checking should be
75  * performed or not.  If get_fs() == USER_DS, checking is performed, with
76  * get_fs() == KERNEL_DS, checking is bypassed.
77  *
78  * For historical reasons, these macros are grossly misnamed.
79  */
80
81 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
82
83 /* addr_limit is the maximum accessible address for the task. we misuse
84  * the KERNEL_DS and USER_DS values to both assign and compare the 
85  * addr_limit values through the equally misnamed get/set_fs macros.
86  * (see above)
87  */
88
89 #define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFF)
90 #define USER_DS         MAKE_MM_SEG(TASK_SIZE)
91
92 #define get_ds()        (KERNEL_DS)
93 #define get_fs()        (current->addr_limit)
94 #define set_fs(x)       (current->addr_limit = (x))
95
96 #define segment_eq(a,b) ((a).seg == (b).seg)
97
98 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
99 #define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
100 #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
101 #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
102
103 extern inline int verify_area(int type, const void * addr, unsigned long size)
104 {
105         return access_ok(type,addr,size) ? 0 : -EFAULT;
106 }
107
108
109 /*
110  * The exception table consists of pairs of addresses: the first is the
111  * address of an instruction that is allowed to fault, and the second is
112  * the address at which the program should continue.  No registers are
113  * modified, so it is entirely up to the continuation code to figure out
114  * what to do.
115  *
116  * All the routines below use bits of fixup code that are out of line
117  * with the main instruction path.  This means when everything is well,
118  * we don't even have to jump over them.  Further, they do not intrude
119  * on our cache or tlb entries.
120  */
121
122 struct exception_table_entry
123 {
124         unsigned long insn, fixup;
125 };
126
127 /* Returns 0 if exception not found and fixup otherwise.  */
128 extern unsigned long search_exception_table(unsigned long);
129
130
131 /*
132  * These are the main single-value transfer routines.  They automatically
133  * use the right size if we just have the right pointer type.
134  *
135  * This gets kind of ugly. We want to return _two_ values in "get_user()"
136  * and yet we don't want to do any pointers, because that is too much
137  * of a performance impact. Thus we have a few rather ugly macros here,
138  * and hide all the uglyness from the user.
139  *
140  * The "__xxx" versions of the user access functions are versions that
141  * do not verify the address space, that must have been done previously
142  * with a separate "access_ok()" call (this is used when we do multiple
143  * accesses to the same area of user memory).
144  *
145  * As we use the same address space for kernel and user data on
146  * CRIS, we can just do these as direct assignments.  (Of course, the
147  * exception handling means that it's no longer "just"...)
148  */
149 #define get_user(x,ptr) \
150   __get_user_check((x),(ptr),sizeof(*(ptr)))
151 #define put_user(x,ptr) \
152   __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
153
154 #define __get_user(x,ptr) \
155   __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
156 #define __put_user(x,ptr) \
157   __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
158
159 /*
160  * The "xxx_ret" versions return constant specified in third argument, if
161  * something bad happens. These macros can be optimized for the
162  * case of just returning from the function xxx_ret is used.
163  */
164
165 #define put_user_ret(x,ptr,ret) \
166         do { if (put_user(x,ptr)) return ret; } while (0)
167
168 #define get_user_ret(x,ptr,ret) \
169         do { if (get_user(x,ptr)) return ret; } while (0)
170
171 #define __put_user_ret(x,ptr,ret) \
172         do { if (__put_user(x,ptr)) return ret; } while (0)
173
174 #define __get_user_ret(x,ptr,ret) \
175         do { if (__get_user(x,ptr)) return ret; } while (0)
176
177
178 extern long __put_user_bad(void);
179
180 #define __put_user_nocheck(x,ptr,size)                  \
181 ({                                                      \
182         long __pu_err;                                  \
183         __put_user_size((x),(ptr),(size),__pu_err);     \
184         __pu_err;                                       \
185 })
186
187 #define __put_user_check(x,ptr,size)                            \
188 ({                                                              \
189         long __pu_err = -EFAULT;                                \
190         __typeof__(*(ptr)) *__pu_addr = (ptr);                  \
191         if (access_ok(VERIFY_WRITE,__pu_addr,size))             \
192                 __put_user_size((x),__pu_addr,(size),__pu_err); \
193         __pu_err;                                               \
194 })
195
196 #define __put_user_size(x,ptr,size,retval)                      \
197 do {                                                            \
198         retval = 0;                                             \
199         switch (size) {                                         \
200           case 1: __put_user_asm(x,ptr,retval,"move.b"); break; \
201           case 2: __put_user_asm(x,ptr,retval,"move.w"); break; \
202           case 4: __put_user_asm(x,ptr,retval,"move.d"); break; \
203           case 8: __put_user_asm_64(x,ptr,retval); break;       \
204           default: __put_user_bad();                            \
205         }                                                       \
206 } while (0)
207
208 struct __large_struct { unsigned long buf[100]; };
209 #define __m(x) (*(struct __large_struct *)(x))
210
211 /*
212  * We don't tell gcc that we are accessing memory, but this is OK
213  * because we do not write to any memory gcc knows about, so there
214  * are no aliasing issues.
215  *
216  * Note that PC at a fault is the address *after* the faulting
217  * instruction.
218  */
219 #define __put_user_asm(x, addr, err, op)                        \
220         __asm__ __volatile__(                                   \
221                 "       "op" %1,[%2]\n"                         \
222                 "2:\n"                                          \
223                 "       .section .fixup,\"ax\"\n"               \
224                 "3:     move.d %3,%0\n"                         \
225                 "       jump 2b\n"                              \
226                 "       .previous\n"                            \
227                 "       .section __ex_table,\"a\"\n"            \
228                 "       .dword 2b,3b\n"                         \
229                 "       .previous\n"                            \
230                 : "=r" (err)                                    \
231                 : "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
232
233 #define __put_user_asm_64(x, addr, err)                         \
234         __asm__ __volatile__(                                   \
235                 "       move.d %M1,[%2]\n"                      \
236                 "2:     move.d %H1,[%2+4]\n"                    \
237                 "4:\n"                                          \
238                 "       .section .fixup,\"ax\"\n"               \
239                 "3:     move.d %3,%0\n"                         \
240                 "       jump 4b\n"                              \
241                 "       .previous\n"                            \
242                 "       .section __ex_table,\"a\"\n"            \
243                 "       .dword 2b,3b\n"                         \
244                 "       .dword 4b,3b\n"                         \
245                 "       .previous\n"                            \
246                 : "=r" (err)                                    \
247                 : "r" (x), "r" (addr), "g" (-EFAULT), "0" (err))
248
249
250 #define __get_user_nocheck(x,ptr,size)                          \
251 ({                                                              \
252         long __gu_err, __gu_val;                                \
253         __get_user_size(__gu_val,(ptr),(size),__gu_err);        \
254         (x) = (__typeof__(*(ptr)))__gu_val;                     \
255         __gu_err;                                               \
256 })
257
258 #define __get_user_check(x,ptr,size)                                    \
259 ({                                                                      \
260         long __gu_err = -EFAULT, __gu_val = 0;                          \
261         const __typeof__(*(ptr)) *__gu_addr = (ptr);                    \
262         if (access_ok(VERIFY_READ,__gu_addr,size))                      \
263                 __get_user_size(__gu_val,__gu_addr,(size),__gu_err);    \
264         (x) = (__typeof__(*(ptr)))__gu_val;                             \
265         __gu_err;                                                       \
266 })
267
268 extern long __get_user_bad(void);
269
270 #define __get_user_size(x,ptr,size,retval)                      \
271 do {                                                            \
272         retval = 0;                                             \
273         switch (size) {                                         \
274           case 1: __get_user_asm(x,ptr,retval,"move.b"); break; \
275           case 2: __get_user_asm(x,ptr,retval,"move.w"); break; \
276           case 4: __get_user_asm(x,ptr,retval,"move.d"); break; \
277           case 8: __get_user_asm_64(x,ptr,retval); break;       \
278           default: (x) = __get_user_bad();                      \
279         }                                                       \
280 } while (0)
281
282 /* See comment before __put_user_asm.  */
283
284 #define __get_user_asm(x, addr, err, op)                \
285         __asm__ __volatile__(                           \
286                 "       "op" [%2],%1\n"                 \
287                 "2:\n"                                  \
288                 "       .section .fixup,\"ax\"\n"       \
289                 "3:     move.d %3,%0\n"                 \
290                 "       moveq 0,%1\n"                   \
291                 "       jump 2b\n"                      \
292                 "       .previous\n"                    \
293                 "       .section __ex_table,\"a\"\n"    \
294                 "       .dword 2b,3b\n"                 \
295                 "       .previous\n"                    \
296                 : "=r" (err), "=r" (x)                  \
297                 : "r" (addr), "g" (-EFAULT), "0" (err))
298
299 #define __get_user_asm_64(x, addr, err)                 \
300         __asm__ __volatile__(                           \
301                 "       move.d [%2],%M1\n"              \
302                 "2:     move.d [%2+4],%H1\n"            \
303                 "4:\n"                                  \
304                 "       .section .fixup,\"ax\"\n"       \
305                 "3:     move.d %3,%0\n"                 \
306                 "       moveq 0,%1\n"                   \
307                 "       jump 4b\n"                      \
308                 "       .previous\n"                    \
309                 "       .section __ex_table,\"a\"\n"    \
310                 "       .dword 2b,3b\n"                 \
311                 "       .dword 4b,3b\n"                 \
312                 "       .previous\n"                    \
313                 : "=r" (err), "=r" (x)                  \
314                 : "r" (addr), "g" (-EFAULT), "0" (err))
315
316 /* More complex functions.  Most are inline, but some call functions that
317    live in lib/usercopy.c  */
318
319 extern unsigned long __copy_user(void *to, const void *from, unsigned long n);
320 extern unsigned long __copy_user_zeroing(void *to, const void *from, unsigned long n);
321 extern unsigned long __do_clear_user(void *to, unsigned long n);
322
323 /*
324  * Copy a null terminated string from userspace.
325  *
326  * Must return:
327  * -EFAULT              for an exception
328  * count                if we hit the buffer limit
329  * bytes copied         if we hit a null byte
330  * (without the null byte)
331  */
332
333 extern inline long         
334 __do_strncpy_from_user(char *dst, const char *src, long count)
335 {
336         long res;
337
338         if (count == 0)
339                 return 0;
340
341         /*
342          * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
343          *  So do we.
344          *
345          *  This code is deduced from:
346          *
347          *      char tmp2;
348          *      long tmp1, tmp3 
349          *      tmp1 = count;
350          *      while ((*dst++ = (tmp2 = *src++)) != 0
351          *             && --tmp1)
352          *        ;
353          *
354          *      res = count - tmp1;
355          *
356          *  with tweaks.
357          */
358
359         __asm__ __volatile__ (
360                 "       move.d %3,%0\n"
361                 "       move.b [%2+],$r9\n"
362                 "1:     beq 2f\n"
363                 "       move.b $r9,[%1+]\n"
364
365                 "       subq 1,%0\n"
366                 "       bne 1b\n"
367                 "       move.b [%2+],$r9\n"
368
369                 "2:     sub.d %3,%0\n"
370                 "       neg.d %0,%0\n"
371                 "3:\n"
372                 "       .section .fixup,\"ax\"\n"
373                 "4:     move.d %7,%0\n"
374                 "       jump 3b\n"
375
376                 /* There's one address for a fault at the first move, and
377                    two possible PC values for a fault at the second move,
378                    being a delay-slot filler.  However, the branch-target
379                    for the second move is the same as the first address.
380                    Just so you don't get confused...  */
381                 "       .previous\n"
382                 "       .section __ex_table,\"a\"\n"
383                 "       .dword 1b,4b\n"
384                 "       .dword 2b,4b\n"
385                 "       .previous"
386                 : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
387                 : "3" (count), "1" (dst), "2" (src), "g" (-EFAULT)
388                 : "r9");
389
390         return res;
391 }
392
393 extern inline unsigned long
394 __generic_copy_to_user(void *to, const void *from, unsigned long n)
395 {
396         if (access_ok(VERIFY_WRITE, to, n))
397                 return __copy_user(to,from,n);
398         return n;
399 }
400
401 extern inline unsigned long
402 __generic_copy_from_user(void *to, const void *from, unsigned long n)
403 {
404         if (access_ok(VERIFY_READ, from, n))
405                 return __copy_user_zeroing(to,from,n);
406         return n;
407 }
408
409 extern inline unsigned long
410 __generic_clear_user(void *to, unsigned long n)
411 {
412         if (access_ok(VERIFY_WRITE, to, n))
413                 return __do_clear_user(to,n);
414         return n;
415 }
416
417 extern inline long
418 __strncpy_from_user(char *dst, const char *src, long count)
419 {
420         return __do_strncpy_from_user(dst, src, count);
421 }
422
423 extern inline long
424 strncpy_from_user(char *dst, const char *src, long count)
425 {
426         long res = -EFAULT;
427         if (access_ok(VERIFY_READ, src, 1))
428                 res = __do_strncpy_from_user(dst, src, count);
429         return res;
430 }
431
432 /* A few copy asms to build up the more complex ones from.
433
434    Note again, a post-increment is performed regardless of whether a bus
435    fault occurred in that instruction, and PC for a faulted insn is the
436    address *after* the insn.  */
437
438 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
439         __asm__ __volatile__ (                          \
440                         COPY                            \
441                 "1:\n"                                  \
442                 "       .section .fixup,\"ax\"\n"       \
443                         FIXUP                           \
444                 "       jump 1b\n"                      \
445                 "       .previous\n"                    \
446                 "       .section __ex_table,\"a\"\n"    \
447                         TENTRY                          \
448                 "       .previous\n"                    \
449                 : "=r" (to), "=r" (from), "=r" (ret)    \
450                 : "0" (to), "1" (from), "2" (ret)       \
451                 : "r9", "memory")
452
453 #define __asm_copy_from_user_1(to, from, ret) \
454         __asm_copy_user_cont(to, from, ret,     \
455                 "       move.b [%1+],$r9\n"     \
456                 "2:     move.b $r9,[%0+]\n",    \
457                 "3:     addq 1,%2\n"            \
458                 "       clear.b [%0+]\n",       \
459                 "       .dword 2b,3b\n")
460
461 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
462         __asm_copy_user_cont(to, from, ret,             \
463                 "       move.w [%1+],$r9\n"             \
464                 "2:     move.w $r9,[%0+]\n" COPY,       \
465                 "3:     addq 2,%2\n"                    \
466                 "       clear.w [%0+]\n" FIXUP,         \
467                 "       .dword 2b,3b\n" TENTRY)
468
469 #define __asm_copy_from_user_2(to, from, ret) \
470         __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
471
472 #define __asm_copy_from_user_3(to, from, ret)           \
473         __asm_copy_from_user_2x_cont(to, from, ret,     \
474                 "       move.b [%1+],$r9\n"             \
475                 "4:     move.b $r9,[%0+]\n",            \
476                 "5:     addq 1,%2\n"                    \
477                 "       clear.b [%0+]\n",               \
478                 "       .dword 4b,5b\n")
479
480 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
481         __asm_copy_user_cont(to, from, ret,             \
482                 "       move.d [%1+],$r9\n"             \
483                 "2:     move.d $r9,[%0+]\n" COPY,       \
484                 "3:     addq 4,%2\n"                    \
485                 "       clear.d [%0+]\n" FIXUP,         \
486                 "       .dword 2b,3b\n" TENTRY)
487
488 #define __asm_copy_from_user_4(to, from, ret) \
489         __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
490
491 #define __asm_copy_from_user_5(to, from, ret) \
492         __asm_copy_from_user_4x_cont(to, from, ret,     \
493                 "       move.b [%1+],$r9\n"             \
494                 "4:     move.b $r9,[%0+]\n",            \
495                 "5:     addq 1,%2\n"                    \
496                 "       clear.b [%0+]\n",               \
497                 "       .dword 4b,5b\n")
498
499 #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
500         __asm_copy_from_user_4x_cont(to, from, ret,     \
501                 "       move.w [%1+],$r9\n"             \
502                 "4:     move.w $r9,[%0+]\n" COPY,       \
503                 "5:     addq 2,%2\n"                    \
504                 "       clear.w [%0+]\n" FIXUP,         \
505                 "       .dword 4b,5b\n" TENTRY)
506
507 #define __asm_copy_from_user_6(to, from, ret) \
508         __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
509
510 #define __asm_copy_from_user_7(to, from, ret) \
511         __asm_copy_from_user_6x_cont(to, from, ret,     \
512                 "       move.b [%1+],$r9\n"             \
513                 "6:     move.b $r9,[%0+]\n",            \
514                 "7:     addq 1,%2\n"                    \
515                 "       clear.b [%0+]\n",               \
516                 "       .dword 6b,7b\n")
517
518 #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
519         __asm_copy_from_user_4x_cont(to, from, ret,     \
520                 "       move.d [%1+],$r9\n"             \
521                 "4:     move.d $r9,[%0+]\n" COPY,       \
522                 "5:     addq 4,%2\n"                    \
523                 "       clear.d [%0+]\n" FIXUP,         \
524                 "       .dword 4b,5b\n" TENTRY)
525
526 #define __asm_copy_from_user_8(to, from, ret) \
527         __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
528
529 #define __asm_copy_from_user_9(to, from, ret) \
530         __asm_copy_from_user_8x_cont(to, from, ret,     \
531                 "       move.b [%1+],$r9\n"             \
532                 "6:     move.b $r9,[%0+]\n",            \
533                 "7:     addq 1,%2\n"                    \
534                 "       clear.b [%0+]\n",               \
535                 "       .dword 6b,7b\n")
536
537 #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
538         __asm_copy_from_user_8x_cont(to, from, ret,     \
539                 "       move.w [%1+],$r9\n"             \
540                 "6:     move.w $r9,[%0+]\n" COPY,       \
541                 "7:     addq 2,%2\n"                    \
542                 "       clear.w [%0+]\n" FIXUP,         \
543                 "       .dword 6b,7b\n" TENTRY)
544
545 #define __asm_copy_from_user_10(to, from, ret) \
546         __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
547
548 #define __asm_copy_from_user_11(to, from, ret)          \
549         __asm_copy_from_user_10x_cont(to, from, ret,    \
550                 "       move.b [%1+],$r9\n"             \
551                 "8:     move.b $r9,[%0+]\n",            \
552                 "9:     addq 1,%2\n"                    \
553                 "       clear.b [%0+]\n",               \
554                 "       .dword 8b,9b\n")
555
556 #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
557         __asm_copy_from_user_8x_cont(to, from, ret,     \
558                 "       move.d [%1+],$r9\n"             \
559                 "6:     move.d $r9,[%0+]\n" COPY,       \
560                 "7:     addq 4,%2\n"                    \
561                 "       clear.d [%0+]\n" FIXUP,         \
562                 "       .dword 6b,7b\n" TENTRY)
563
564 #define __asm_copy_from_user_12(to, from, ret) \
565         __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
566
567 #define __asm_copy_from_user_13(to, from, ret) \
568         __asm_copy_from_user_12x_cont(to, from, ret,    \
569                 "       move.b [%1+],$r9\n"             \
570                 "8:     move.b $r9,[%0+]\n",            \
571                 "9:     addq 1,%2\n"                    \
572                 "       clear.b [%0+]\n",               \
573                 "       .dword 8b,9b\n")
574
575 #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
576         __asm_copy_from_user_12x_cont(to, from, ret,    \
577                 "       move.w [%1+],$r9\n"             \
578                 "8:     move.w $r9,[%0+]\n" COPY,       \
579                 "9:     addq 2,%2\n"                    \
580                 "       clear.w [%0+]\n" FIXUP,         \
581                 "       .dword 8b,9b\n" TENTRY)
582
583 #define __asm_copy_from_user_14(to, from, ret) \
584         __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
585
586 #define __asm_copy_from_user_15(to, from, ret) \
587         __asm_copy_from_user_14x_cont(to, from, ret,    \
588                 "       move.b [%1+],$r9\n"             \
589                 "10:    move.b $r9,[%0+]\n",            \
590                 "11:    addq 1,%2\n"                    \
591                 "       clear.b [%0+]\n",               \
592                 "       .dword 10b,11b\n")
593
594 #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
595         __asm_copy_from_user_12x_cont(to, from, ret,    \
596                 "       move.d [%1+],$r9\n"             \
597                 "8:     move.d $r9,[%0+]\n" COPY,       \
598                 "9:     addq 4,%2\n"                    \
599                 "       clear.d [%0+]\n" FIXUP,         \
600                 "       .dword 8b,9b\n" TENTRY)
601
602 #define __asm_copy_from_user_16(to, from, ret) \
603         __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
604
605 #define __asm_copy_from_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
606         __asm_copy_from_user_16x_cont(to, from, ret,    \
607                 "       move.d [%1+],$r9\n"             \
608                 "10:    move.d $r9,[%0+]\n" COPY,       \
609                 "11:    addq 4,%2\n"                    \
610                 "       clear.d [%0+]\n" FIXUP,         \
611                 "       .dword 10b,11b\n" TENTRY)
612
613 #define __asm_copy_from_user_20(to, from, ret) \
614         __asm_copy_from_user_20x_cont(to, from, ret, "", "", "")
615
616 #define __asm_copy_from_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
617         __asm_copy_from_user_20x_cont(to, from, ret,    \
618                 "       move.d [%1+],$r9\n"             \
619                 "12:    move.d $r9,[%0+]\n" COPY,       \
620                 "13:    addq 4,%2\n"                    \
621                 "       clear.d [%0+]\n" FIXUP,         \
622                 "       .dword 12b,13b\n" TENTRY)
623
624 #define __asm_copy_from_user_24(to, from, ret) \
625         __asm_copy_from_user_24x_cont(to, from, ret, "", "", "")
626
627 /* And now, the to-user ones.  */
628
629 #define __asm_copy_to_user_1(to, from, ret)     \
630         __asm_copy_user_cont(to, from, ret,     \
631                 "       move.b [%1+],$r9\n"     \
632                 "       move.b $r9,[%0+]\n2:\n",        \
633                 "3:     addq 1,%2\n",           \
634                 "       .dword 2b,3b\n")
635
636 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
637         __asm_copy_user_cont(to, from, ret,             \
638                 "       move.w [%1+],$r9\n"             \
639                 "       move.w $r9,[%0+]\n2:\n" COPY,   \
640                 "3:     addq 2,%2\n" FIXUP,             \
641                 "       .dword 2b,3b\n" TENTRY)
642
643 #define __asm_copy_to_user_2(to, from, ret) \
644         __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
645
646 #define __asm_copy_to_user_3(to, from, ret) \
647         __asm_copy_to_user_2x_cont(to, from, ret,       \
648                 "       move.b [%1+],$r9\n"             \
649                 "       move.b $r9,[%0+]\n4:\n",                \
650                 "5:     addq 1,%2\n",                   \
651                 "       .dword 4b,5b\n")
652
653 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
654         __asm_copy_user_cont(to, from, ret,             \
655                 "       move.d [%1+],$r9\n"             \
656                 "       move.d $r9,[%0+]\n2:\n" COPY,   \
657                 "3:     addq 4,%2\n" FIXUP,             \
658                 "       .dword 2b,3b\n" TENTRY)
659
660 #define __asm_copy_to_user_4(to, from, ret) \
661         __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
662
663 #define __asm_copy_to_user_5(to, from, ret) \
664         __asm_copy_to_user_4x_cont(to, from, ret,       \
665                 "       move.b [%1+],$r9\n"             \
666                 "       move.b $r9,[%0+]\n4:\n",                \
667                 "5:     addq 1,%2\n",                   \
668                 "       .dword 4b,5b\n")
669
670 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
671         __asm_copy_to_user_4x_cont(to, from, ret,       \
672                 "       move.w [%1+],$r9\n"             \
673                 "       move.w $r9,[%0+]\n4:\n" COPY,   \
674                 "5:     addq 2,%2\n" FIXUP,             \
675                 "       .dword 4b,5b\n" TENTRY)
676
677 #define __asm_copy_to_user_6(to, from, ret) \
678         __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
679
680 #define __asm_copy_to_user_7(to, from, ret) \
681         __asm_copy_to_user_6x_cont(to, from, ret,       \
682                 "       move.b [%1+],$r9\n"             \
683                 "       move.b $r9,[%0+]\n6:\n",                \
684                 "7:     addq 1,%2\n",                   \
685                 "       .dword 6b,7b\n")
686
687 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
688         __asm_copy_to_user_4x_cont(to, from, ret,       \
689                 "       move.d [%1+],$r9\n"             \
690                 "       move.d $r9,[%0+]\n4:\n" COPY,   \
691                 "5:     addq 4,%2\n"  FIXUP,            \
692                 "       .dword 4b,5b\n" TENTRY)
693
694 #define __asm_copy_to_user_8(to, from, ret) \
695         __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
696
697 #define __asm_copy_to_user_9(to, from, ret) \
698         __asm_copy_to_user_8x_cont(to, from, ret,       \
699                 "       move.b [%1+],$r9\n"             \
700                 "       move.b $r9,[%0+]\n6:\n",                \
701                 "7:     addq 1,%2\n",                   \
702                 "       .dword 6b,7b\n")
703
704 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
705         __asm_copy_to_user_8x_cont(to, from, ret,       \
706                 "       move.w [%1+],$r9\n"             \
707                 "       move.w $r9,[%0+]\n6:\n" COPY,   \
708                 "7:     addq 2,%2\n" FIXUP,             \
709                 "       .dword 6b,7b\n" TENTRY)
710
711 #define __asm_copy_to_user_10(to, from, ret) \
712         __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
713
714 #define __asm_copy_to_user_11(to, from, ret) \
715         __asm_copy_to_user_10x_cont(to, from, ret,      \
716                 "       move.b [%1+],$r9\n"             \
717                 "       move.b $r9,[%0+]\n8:\n",                \
718                 "9:     addq 1,%2\n",                   \
719                 "       .dword 8b,9b\n")
720
721 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
722         __asm_copy_to_user_8x_cont(to, from, ret,       \
723                 "       move.d [%1+],$r9\n"             \
724                 "       move.d $r9,[%0+]\n6:\n" COPY,   \
725                 "7:     addq 4,%2\n" FIXUP,             \
726                 "       .dword 6b,7b\n" TENTRY)
727
728 #define __asm_copy_to_user_12(to, from, ret) \
729         __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
730
731 #define __asm_copy_to_user_13(to, from, ret) \
732         __asm_copy_to_user_12x_cont(to, from, ret,      \
733                 "       move.b [%1+],$r9\n"             \
734                 "       move.b $r9,[%0+]\n8:\n",                \
735                 "9:     addq 1,%2\n",                   \
736                 "       .dword 8b,9b\n")
737
738 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
739         __asm_copy_to_user_12x_cont(to, from, ret,      \
740                 "       move.w [%1+],$r9\n"             \
741                 "       move.w $r9,[%0+]\n8:\n" COPY,   \
742                 "9:     addq 2,%2\n" FIXUP,             \
743                 "       .dword 8b,9b\n" TENTRY)
744
745 #define __asm_copy_to_user_14(to, from, ret)    \
746         __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
747
748 #define __asm_copy_to_user_15(to, from, ret) \
749         __asm_copy_to_user_14x_cont(to, from, ret,      \
750                 "       move.b [%1+],$r9\n"             \
751                 "       move.b $r9,[%0+]\n10:\n",               \
752                 "11:    addq 1,%2\n",                   \
753                 "       .dword 10b,11b\n")
754
755 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
756         __asm_copy_to_user_12x_cont(to, from, ret,      \
757                 "       move.d [%1+],$r9\n"             \
758                 "       move.d $r9,[%0+]\n8:\n" COPY,   \
759                 "9:     addq 4,%2\n" FIXUP,             \
760                 "       .dword 8b,9b\n" TENTRY)
761
762 #define __asm_copy_to_user_16(to, from, ret) \
763         __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
764
765 #define __asm_copy_to_user_20x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
766         __asm_copy_to_user_16x_cont(to, from, ret,      \
767                 "       move.d [%1+],$r9\n"             \
768                 "       move.d $r9,[%0+]\n10:\n" COPY,  \
769                 "11:    addq 4,%2\n" FIXUP,             \
770                 "       .dword 10b,11b\n" TENTRY)
771
772 #define __asm_copy_to_user_20(to, from, ret) \
773         __asm_copy_to_user_20x_cont(to, from, ret, "", "", "")
774
775 #define __asm_copy_to_user_24x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
776         __asm_copy_to_user_20x_cont(to, from, ret,      \
777                 "       move.d [%1+],$r9\n"             \
778                 "       move.d $r9,[%0+]\n12:\n" COPY,  \
779                 "13:    addq 4,%2\n" FIXUP,             \
780                 "       .dword 12b,13b\n" TENTRY)
781
782 #define __asm_copy_to_user_24(to, from, ret)    \
783         __asm_copy_to_user_24x_cont(to, from, ret, "", "", "")
784
785 /* Define a few clearing asms with exception handlers.  */
786
787 /* This frame-asm is like the __asm_copy_user_cont one, but has one less
788    input.  */
789
790 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
791         __asm__ __volatile__ (                          \
792                         CLEAR                           \
793                 "1:\n"                                  \
794                 "       .section .fixup,\"ax\"\n"       \
795                         FIXUP                           \
796                 "       jump 1b\n"                      \
797                 "       .previous\n"                    \
798                 "       .section __ex_table,\"a\"\n"    \
799                         TENTRY                          \
800                 "       .previous"                      \
801                 : "=r" (to), "=r" (ret)                 \
802                 : "0" (to), "1" (ret)                   \
803                 : "r9", "memory")
804
805 #define __asm_clear_1(to, ret) \
806         __asm_clear(to, ret,                    \
807                 "       clear.b [%0+]\n2:\n",   \
808                 "3:     addq 1,%1\n",           \
809                 "       .dword 2b,3b\n")
810
811 #define __asm_clear_2(to, ret) \
812         __asm_clear(to, ret,                    \
813                 "       clear.w [%0+]\n2:\n",   \
814                 "3:     addq 2,%1\n",           \
815                 "       .dword 2b,3b\n")
816
817 #define __asm_clear_3(to, ret) \
818      __asm_clear(to, ret,                       \
819                  "      clear.w [%0+]\n"        \
820                  "2:    clear.b [%0+]\n3:\n",   \
821                  "4:    addq 2,%1\n"            \
822                  "5:    addq 1,%1\n",           \
823                  "      .dword 2b,4b\n"         \
824                  "      .dword 3b,5b\n")
825
826 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
827         __asm_clear(to, ret,                            \
828                 "       clear.d [%0+]\n2:\n" CLEAR,     \
829                 "3:     addq 4,%1\n" FIXUP,             \
830                 "       .dword 2b,3b\n" TENTRY)
831
832 #define __asm_clear_4(to, ret) \
833         __asm_clear_4x_cont(to, ret, "", "", "")
834
835 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
836         __asm_clear_4x_cont(to, ret,                    \
837                 "       clear.d [%0+]\n4:\n" CLEAR,     \
838                 "5:     addq 4,%1\n" FIXUP,             \
839                 "       .dword 4b,5b\n" TENTRY)
840
841 #define __asm_clear_8(to, ret) \
842         __asm_clear_8x_cont(to, ret, "", "", "")
843
844 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
845         __asm_clear_8x_cont(to, ret,                    \
846                 "       clear.d [%0+]\n6:\n" CLEAR,     \
847                 "7:     addq 4,%1\n" FIXUP,             \
848                 "       .dword 6b,7b\n" TENTRY)
849
850 #define __asm_clear_12(to, ret) \
851         __asm_clear_12x_cont(to, ret, "", "", "")
852
853 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
854         __asm_clear_12x_cont(to, ret,                   \
855                 "       clear.d [%0+]\n8:\n" CLEAR,     \
856                 "9:     addq 4,%1\n" FIXUP,             \
857                 "       .dword 8b,9b\n" TENTRY)
858
859 #define __asm_clear_16(to, ret) \
860         __asm_clear_16x_cont(to, ret, "", "", "")
861
862 #define __asm_clear_20x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
863         __asm_clear_16x_cont(to, ret,                   \
864                 "       clear.d [%0+]\n10:\n" CLEAR,    \
865                 "11:    addq 4,%1\n" FIXUP,             \
866                 "       .dword 10b,11b\n" TENTRY)
867
868 #define __asm_clear_20(to, ret) \
869         __asm_clear_20x_cont(to, ret, "", "", "")
870
871 #define __asm_clear_24x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
872         __asm_clear_20x_cont(to, ret,                   \
873                 "       clear.d [%0+]\n12:\n" CLEAR,    \
874                 "13:    addq 4,%1\n" FIXUP,             \
875                 "       .dword 12b,13b\n" TENTRY)
876
877 #define __asm_clear_24(to, ret) \
878         __asm_clear_24x_cont(to, ret, "", "", "")
879
880 /* Note that if these expand awfully if made into switch constructs, so
881    don't do that.  */
882
883 extern inline unsigned long
884 __constant_copy_from_user(void *to, const void *from, unsigned long n)
885 {
886         unsigned long ret = 0;
887         if (n == 0)
888                 ;
889         else if (n == 1)
890                 __asm_copy_from_user_1(to, from, ret);
891         else if (n == 2)
892                 __asm_copy_from_user_2(to, from, ret);
893         else if (n == 3)
894                 __asm_copy_from_user_3(to, from, ret);
895         else if (n == 4)
896                 __asm_copy_from_user_4(to, from, ret);
897         else if (n == 5)
898                 __asm_copy_from_user_5(to, from, ret);
899         else if (n == 6)
900                 __asm_copy_from_user_6(to, from, ret);
901         else if (n == 7)
902                 __asm_copy_from_user_7(to, from, ret);
903         else if (n == 8)
904                 __asm_copy_from_user_8(to, from, ret);
905         else if (n == 9)
906                 __asm_copy_from_user_9(to, from, ret);
907         else if (n == 10)
908                 __asm_copy_from_user_10(to, from, ret);
909         else if (n == 11)
910                 __asm_copy_from_user_11(to, from, ret);
911         else if (n == 12)
912                 __asm_copy_from_user_12(to, from, ret);
913         else if (n == 13)
914                 __asm_copy_from_user_13(to, from, ret);
915         else if (n == 14)
916                 __asm_copy_from_user_14(to, from, ret);
917         else if (n == 15)
918                 __asm_copy_from_user_15(to, from, ret);
919         else if (n == 16)
920                 __asm_copy_from_user_16(to, from, ret);
921         else if (n == 20)
922                 __asm_copy_from_user_20(to, from, ret);
923         else if (n == 24)
924                 __asm_copy_from_user_24(to, from, ret);
925         else
926                 ret = __generic_copy_from_user(to, from, n);
927
928         return ret;
929 }
930
931 /* Ditto, don't make a switch out of this.  */
932
933 extern inline unsigned long
934 __constant_copy_to_user(void *to, const void *from, unsigned long n)
935 {
936         unsigned long ret = 0;
937         if (n == 0)
938                 ;
939         else if (n == 1)
940                 __asm_copy_to_user_1(to, from, ret);
941         else if (n == 2)
942                 __asm_copy_to_user_2(to, from, ret);
943         else if (n == 3)
944                 __asm_copy_to_user_3(to, from, ret);
945         else if (n == 4)
946                 __asm_copy_to_user_4(to, from, ret);
947         else if (n == 5)
948                 __asm_copy_to_user_5(to, from, ret);
949         else if (n == 6)
950                 __asm_copy_to_user_6(to, from, ret);
951         else if (n == 7)
952                 __asm_copy_to_user_7(to, from, ret);
953         else if (n == 8)
954                 __asm_copy_to_user_8(to, from, ret);
955         else if (n == 9)
956                 __asm_copy_to_user_9(to, from, ret);
957         else if (n == 10)
958                 __asm_copy_to_user_10(to, from, ret);
959         else if (n == 11)
960                 __asm_copy_to_user_11(to, from, ret);
961         else if (n == 12)
962                 __asm_copy_to_user_12(to, from, ret);
963         else if (n == 13)
964                 __asm_copy_to_user_13(to, from, ret);
965         else if (n == 14)
966                 __asm_copy_to_user_14(to, from, ret);
967         else if (n == 15)
968                 __asm_copy_to_user_15(to, from, ret);
969         else if (n == 16)
970                 __asm_copy_to_user_16(to, from, ret);
971         else if (n == 20)
972                 __asm_copy_to_user_20(to, from, ret);
973         else if (n == 24)
974                 __asm_copy_to_user_24(to, from, ret);
975         else
976                 ret = __generic_copy_to_user(to, from, n);
977
978         return ret;
979 }
980
981 /* No switch, please.  */
982
983 extern inline unsigned long
984 __constant_clear_user(void *to, unsigned long n)
985 {
986         unsigned long ret = 0;
987         if (n == 0)
988                 ;
989         else if (n == 1)
990                 __asm_clear_1(to, ret);
991         else if (n == 2)
992                 __asm_clear_2(to, ret);
993         else if (n == 3)
994                 __asm_clear_3(to, ret);
995         else if (n == 4)
996                 __asm_clear_4(to, ret);
997         else if (n == 8)
998                 __asm_clear_8(to, ret);
999         else if (n == 12)
1000                 __asm_clear_12(to, ret);
1001         else if (n == 16)
1002                 __asm_clear_16(to, ret);
1003         else if (n == 20)
1004                 __asm_clear_20(to, ret);
1005         else if (n == 24)
1006                 __asm_clear_24(to, ret);
1007         else
1008                 ret = __generic_clear_user(to, n);
1009
1010         return ret;
1011 }
1012
1013
1014 #define clear_user(to, n)                       \
1015 (__builtin_constant_p(n) ?                      \
1016  __constant_clear_user(to, n) :                 \
1017  __generic_clear_user(to, n))
1018
1019 #define copy_from_user(to, from, n)             \
1020 (__builtin_constant_p(n) ?                      \
1021  __constant_copy_from_user(to, from, n) :       \
1022  __generic_copy_from_user(to, from, n))
1023
1024 #define copy_to_user(to, from, n)               \
1025 (__builtin_constant_p(n) ?                      \
1026  __constant_copy_to_user(to, from, n) :         \
1027  __generic_copy_to_user(to, from, n))
1028
1029 #define copy_to_user_ret(to,from,n,retval) \
1030         do { if (copy_to_user(to,from,n)) return retval; } while (0)
1031 #define copy_from_user_ret(to,from,n,retval) \
1032         do { if (copy_from_user(to,from,n)) return retval; } while (0)
1033
1034 /* We let the __ versions of copy_from/to_user inline, because they're often
1035  * used in fast paths and have only a small space overhead.
1036  */
1037
1038 extern inline unsigned long
1039 __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
1040 {
1041         return __copy_user_zeroing(to,from,n);
1042 }
1043
1044 extern inline unsigned long
1045 __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
1046 {
1047         return __copy_user(to,from,n);
1048 }
1049
1050 extern inline unsigned long
1051 __generic_clear_user_nocheck(void *to, unsigned long n)
1052 {
1053         return __do_clear_user(to,n);
1054 }
1055
1056 /* without checking */
1057
1058 #define __copy_to_user(to,from,n)   __generic_copy_to_user_nocheck((to),(from),(n))
1059 #define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n))
1060 #define __clear_user(to,n) __generic_clear_user_nocheck((to),(n))
1061
1062 /*
1063  * Return the size of a string (including the ending 0)
1064  *
1065  * Return length of string in userspace including terminating 0
1066  * or 0 for error.  Return a value greater than N if too long.
1067  */
1068
1069 extern inline long
1070 strnlen_user(const char *s, long n)
1071 {
1072         long res, tmp1;
1073
1074         if (!access_ok(VERIFY_READ, s, 0))
1075                 return 0;
1076
1077         /*
1078          * This code is deduced from:
1079          *
1080          *      tmp1 = n;
1081          *      while (tmp1-- > 0 && *s++)
1082          *        ;
1083          *
1084          *      res = n - tmp1;
1085          *
1086          *  (with tweaks).
1087          */
1088
1089         __asm__ __volatile__ (
1090                 "       move.d %1,$r9\n"
1091                 "0:\n"
1092                 "       ble 1f\n"
1093                 "       subq 1,$r9\n"
1094
1095                 "       test.b [%0+]\n"
1096                 "       bne 0b\n"
1097                 "       test.d $r9\n"
1098                 "1:\n"
1099                 "       move.d %1,%0\n"
1100                 "       sub.d $r9,%0\n"
1101                 "2:\n"
1102                 "       .section .fixup,\"ax\"\n"
1103
1104                 "3:     clear.d %0\n"
1105                 "       jump 2b\n"
1106
1107                 /* There's one address for a fault at the first move, and
1108                    two possible PC values for a fault at the second move,
1109                    being a delay-slot filler.  However, the branch-target
1110                    for the second move is the same as the first address.
1111                    Just so you don't get confused...  */
1112                 "       .previous\n"
1113                 "       .section __ex_table,\"a\"\n"
1114                 "       .dword 0b,3b\n"
1115                 "       .dword 1b,3b\n"
1116                 "       .previous\n"
1117                 : "=r" (res), "=r" (tmp1)
1118                 : "0" (s), "1" (n)
1119                 : "r9");
1120
1121         return res;
1122 }
1123
1124 #define strlen_user(str)        strnlen_user((str), 0x7ffffffe)
1125
1126 #endif  /* __ASSEMBLY__ */
1127
1128 #endif  /* _CRIS_UACCESS_H */