import of upstream 2.4.34.4 from kernel.org
[linux-2.4.git] / arch / m68k / kernel / sys_m68k.c
1 /*
2  * linux/arch/m68k/kernel/sys_m68k.c
3  *
4  * This file contains various random system calls that
5  * have a non-standard calling sequence on the Linux/m68k
6  * platform.
7  */
8
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/mman.h>
19 #include <linux/file.h>
20 #include <linux/utsname.h>
21
22 #include <asm/setup.h>
23 #include <asm/uaccess.h>
24 #include <asm/cachectl.h>
25 #include <asm/traps.h>
26 #include <asm/ipc.h>
27 #include <asm/page.h>
28
29 /*
30  * sys_pipe() is the normal C calling standard for creating
31  * a pipe. It's not the way unix traditionally does this, though.
32  */
33 asmlinkage int sys_pipe(unsigned long * fildes)
34 {
35         int fd[2];
36         int error;
37
38         error = do_pipe(fd);
39         if (!error) {
40                 if (copy_to_user(fildes, fd, 2*sizeof(int)))
41                         error = -EFAULT;
42         }
43         return error;
44 }
45
46 /* common code for old and new mmaps */
47 static inline long do_mmap2(
48         unsigned long addr, unsigned long len,
49         unsigned long prot, unsigned long flags,
50         unsigned long fd, unsigned long pgoff)
51 {
52         int error = -EBADF;
53         struct file * file = NULL;
54
55         flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
56         if (!(flags & MAP_ANONYMOUS)) {
57                 file = fget(fd);
58                 if (!file)
59                         goto out;
60         }
61
62         down_write(&current->mm->mmap_sem);
63         error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
64         up_write(&current->mm->mmap_sem);
65
66         if (file)
67                 fput(file);
68 out:
69         return error;
70 }
71
72 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
73         unsigned long prot, unsigned long flags,
74         unsigned long fd, unsigned long pgoff)
75 {
76         return do_mmap2(addr, len, prot, flags, fd, pgoff);
77 }
78
79 /*
80  * Perform the select(nd, in, out, ex, tv) and mmap() system
81  * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
82  * handle more than 4 system call parameters, so these system calls
83  * used a memory block for parameter passing..
84  */
85
86 struct mmap_arg_struct {
87         unsigned long addr;
88         unsigned long len;
89         unsigned long prot;
90         unsigned long flags;
91         unsigned long fd;
92         unsigned long offset;
93 };
94
95 asmlinkage int old_mmap(struct mmap_arg_struct *arg)
96 {
97         struct mmap_arg_struct a;
98         int error = -EFAULT;
99
100         if (copy_from_user(&a, arg, sizeof(a)))
101                 goto out;
102
103         error = -EINVAL;
104         if (a.offset & ~PAGE_MASK)
105                 goto out;
106
107         a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
108
109         error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
110 out:
111         return error;
112 }
113
114 #if 0
115 struct mmap_arg_struct64 {
116         __u32 addr;
117         __u32 len;
118         __u32 prot;
119         __u32 flags;
120         __u64 offset; /* 64 bits */
121         __u32 fd;
122 };
123
124 asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
125 {
126         int error = -EFAULT;
127         struct file * file = NULL;
128         struct mmap_arg_struct64 a;
129         unsigned long pgoff;
130
131         if (copy_from_user(&a, arg, sizeof(a)))
132                 return -EFAULT;
133
134         if ((long)a.offset & ~PAGE_MASK)
135                 return -EINVAL;
136
137         pgoff = a.offset >> PAGE_SHIFT;
138         if ((a.offset >> PAGE_SHIFT) != pgoff)
139                 return -EINVAL;
140
141         if (!(a.flags & MAP_ANONYMOUS)) {
142                 error = -EBADF;
143                 file = fget(a.fd);
144                 if (!file)
145                         goto out;
146         }
147         a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
148
149         down_write(&current->mm->mmap_sem);
150         error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
151         up_write(&current->mm->mmap_sem);
152         if (file)
153                 fput(file);
154 out:
155         return error;
156 }
157 #endif
158
159 extern asmlinkage int sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
160
161 struct sel_arg_struct {
162         unsigned long n;
163         fd_set *inp, *outp, *exp;
164         struct timeval *tvp;
165 };
166
167 asmlinkage int old_select(struct sel_arg_struct *arg)
168 {
169         struct sel_arg_struct a;
170
171         if (copy_from_user(&a, arg, sizeof(a)))
172                 return -EFAULT;
173         /* sys_select() does the appropriate kernel locking */
174         return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
175 }
176
177 /*
178  * sys_ipc() is the de-multiplexer for the SysV IPC calls..
179  *
180  * This is really horribly ugly.
181  */
182 asmlinkage int sys_ipc (uint call, int first, int second,
183                         int third, void *ptr, long fifth)
184 {
185         int version, ret;
186
187         version = call >> 16; /* hack for backward compatibility */
188         call &= 0xffff;
189
190         if (call <= SEMCTL)
191                 switch (call) {
192                 case SEMOP:
193                         return sys_semop (first, (struct sembuf *)ptr, second);
194                 case SEMGET:
195                         return sys_semget (first, second, third);
196                 case SEMCTL: {
197                         union semun fourth;
198                         if (!ptr)
199                                 return -EINVAL;
200                         if (get_user(fourth.__pad, (void **) ptr))
201                                 return -EFAULT;
202                         return sys_semctl (first, second, third, fourth);
203                         }
204                 default:
205                         return -EINVAL;
206                 }
207         if (call <= MSGCTL) 
208                 switch (call) {
209                 case MSGSND:
210                         return sys_msgsnd (first, (struct msgbuf *) ptr, 
211                                           second, third);
212                 case MSGRCV:
213                         switch (version) {
214                         case 0: {
215                                 struct ipc_kludge tmp;
216                                 if (!ptr)
217                                         return -EINVAL;
218                                 if (copy_from_user (&tmp,
219                                                     (struct ipc_kludge *)ptr,
220                                                     sizeof (tmp)))
221                                         return -EFAULT;
222                                 return sys_msgrcv (first, tmp.msgp, second,
223                                                    tmp.msgtyp, third);
224                                 }
225                         default:
226                                 return sys_msgrcv (first,
227                                                    (struct msgbuf *) ptr,
228                                                    second, fifth, third);
229                         }
230                 case MSGGET:
231                         return sys_msgget ((key_t) first, second);
232                 case MSGCTL:
233                         return sys_msgctl (first, second,
234                                            (struct msqid_ds *) ptr);
235                 default:
236                         return -EINVAL;
237                 }
238         if (call <= SHMCTL) 
239                 switch (call) {
240                 case SHMAT:
241                         switch (version) {
242                         default: {
243                                 ulong raddr;
244                                 ret = sys_shmat (first, (char *) ptr,
245                                                  second, &raddr);
246                                 if (ret)
247                                         return ret;
248                                 return put_user (raddr, (ulong *) third);
249                         }
250                         }
251                 case SHMDT: 
252                         return sys_shmdt ((char *)ptr);
253                 case SHMGET:
254                         return sys_shmget (first, second, third);
255                 case SHMCTL:
256                         return sys_shmctl (first, second,
257                                            (struct shmid_ds *) ptr);
258                 default:
259                         return -EINVAL;
260                 }
261
262         return -EINVAL;
263 }
264
265 asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int on)
266 {
267   return -ENOSYS;
268 }
269
270
271 /* Convert virtual (user) address VADDR to physical address PADDR */
272 #define virt_to_phys_040(vaddr)                                         \
273 ({                                                                      \
274   unsigned long _mmusr, _paddr;                                         \
275                                                                         \
276   __asm__ __volatile__ (".chip 68040\n\t"                               \
277                         "ptestr (%1)\n\t"                               \
278                         "movec %%mmusr,%0\n\t"                          \
279                         ".chip 68k"                                     \
280                         : "=r" (_mmusr)                                 \
281                         : "a" (vaddr));                                 \
282   _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0;             \
283   _paddr;                                                               \
284 })
285
286 static inline int
287 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
288 {
289   unsigned long paddr, i;
290
291   switch (scope)
292     {
293     case FLUSH_SCOPE_ALL:
294       switch (cache)
295         {
296         case FLUSH_CACHE_DATA:
297           /* This nop is needed for some broken versions of the 68040.  */
298           __asm__ __volatile__ ("nop\n\t"
299                                 ".chip 68040\n\t"
300                                 "cpusha %dc\n\t"
301                                 ".chip 68k");
302           break;
303         case FLUSH_CACHE_INSN:
304           __asm__ __volatile__ ("nop\n\t"
305                                 ".chip 68040\n\t"
306                                 "cpusha %ic\n\t"
307                                 ".chip 68k");
308           break;
309         default:
310         case FLUSH_CACHE_BOTH:
311           __asm__ __volatile__ ("nop\n\t"
312                                 ".chip 68040\n\t"
313                                 "cpusha %bc\n\t"
314                                 ".chip 68k");
315           break;
316         }
317       break;
318
319     case FLUSH_SCOPE_LINE:
320       /* Find the physical address of the first mapped page in the
321          address range.  */
322       if ((paddr = virt_to_phys_040(addr))) {
323         paddr += addr & ~(PAGE_MASK | 15);
324         len = (len + (addr & 15) + 15) >> 4;
325       } else {
326         unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
327
328         if (len <= tmp)
329           return 0;
330         addr += tmp;
331         len -= tmp;
332         tmp = PAGE_SIZE;
333         for (;;)
334           {
335             if ((paddr = virt_to_phys_040(addr)))
336               break;
337             if (len <= tmp)
338               return 0;
339             addr += tmp;
340             len -= tmp;
341           }
342         len = (len + 15) >> 4;
343       }
344       i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
345       while (len--)
346         {
347           switch (cache)
348             {
349             case FLUSH_CACHE_DATA:
350               __asm__ __volatile__ ("nop\n\t"
351                                     ".chip 68040\n\t"
352                                     "cpushl %%dc,(%0)\n\t"
353                                     ".chip 68k"
354                                     : : "a" (paddr));
355               break;
356             case FLUSH_CACHE_INSN:
357               __asm__ __volatile__ ("nop\n\t"
358                                     ".chip 68040\n\t"
359                                     "cpushl %%ic,(%0)\n\t"
360                                     ".chip 68k"
361                                     : : "a" (paddr));
362               break;
363             default:
364             case FLUSH_CACHE_BOTH:
365               __asm__ __volatile__ ("nop\n\t"
366                                     ".chip 68040\n\t"
367                                     "cpushl %%bc,(%0)\n\t"
368                                     ".chip 68k"
369                                     : : "a" (paddr));
370               break;
371             }
372           if (!--i && len)
373             {
374               /*
375                * No need to page align here since it is done by
376                * virt_to_phys_040().
377                */
378               addr += PAGE_SIZE;
379               i = PAGE_SIZE / 16;
380               /* Recompute physical address when crossing a page
381                  boundary. */
382               for (;;)
383                 {
384                   if ((paddr = virt_to_phys_040(addr)))
385                     break;
386                   if (len <= i)
387                     return 0;
388                   len -= i;
389                   addr += PAGE_SIZE;
390                 }
391             }
392           else
393             paddr += 16;
394         }
395       break;
396
397     default:
398     case FLUSH_SCOPE_PAGE:
399       len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
400       for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
401         {
402           if (!(paddr = virt_to_phys_040(addr)))
403             continue;
404           switch (cache)
405             {
406             case FLUSH_CACHE_DATA:
407               __asm__ __volatile__ ("nop\n\t"
408                                     ".chip 68040\n\t"
409                                     "cpushp %%dc,(%0)\n\t"
410                                     ".chip 68k"
411                                     : : "a" (paddr));
412               break;
413             case FLUSH_CACHE_INSN:
414               __asm__ __volatile__ ("nop\n\t"
415                                     ".chip 68040\n\t"
416                                     "cpushp %%ic,(%0)\n\t"
417                                     ".chip 68k"
418                                     : : "a" (paddr));
419               break;
420             default:
421             case FLUSH_CACHE_BOTH:
422               __asm__ __volatile__ ("nop\n\t"
423                                     ".chip 68040\n\t"
424                                     "cpushp %%bc,(%0)\n\t"
425                                     ".chip 68k"
426                                     : : "a" (paddr));
427               break;
428             }
429         }
430       break;
431     }
432   return 0;
433 }
434
435 #define virt_to_phys_060(vaddr)                         \
436 ({                                                      \
437   unsigned long paddr;                                  \
438   __asm__ __volatile__ (".chip 68060\n\t"               \
439                         "plpar (%0)\n\t"                \
440                         ".chip 68k"                     \
441                         : "=a" (paddr)                  \
442                         : "0" (vaddr));                 \
443   (paddr); /* XXX */                                    \
444 })
445
446 static inline int
447 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
448 {
449   unsigned long paddr, i;
450
451   /*
452    * 68060 manual says: 
453    *  cpush %dc : flush DC, remains valid (with our %cacr setup)
454    *  cpush %ic : invalidate IC
455    *  cpush %bc : flush DC + invalidate IC
456    */
457   switch (scope)
458     {
459     case FLUSH_SCOPE_ALL:
460       switch (cache)
461         {
462         case FLUSH_CACHE_DATA:
463           __asm__ __volatile__ (".chip 68060\n\t"
464                                 "cpusha %dc\n\t"
465                                 ".chip 68k");
466           break;
467         case FLUSH_CACHE_INSN:
468           __asm__ __volatile__ (".chip 68060\n\t"
469                                 "cpusha %ic\n\t"
470                                 ".chip 68k");
471           break;
472         default:
473         case FLUSH_CACHE_BOTH:
474           __asm__ __volatile__ (".chip 68060\n\t"
475                                 "cpusha %bc\n\t"
476                                 ".chip 68k");
477           break;
478         }
479       break;
480
481     case FLUSH_SCOPE_LINE:
482       /* Find the physical address of the first mapped page in the
483          address range.  */
484       len += addr & 15;
485       addr &= -16;
486       if (!(paddr = virt_to_phys_060(addr))) {
487         unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
488
489         if (len <= tmp)
490           return 0;
491         addr += tmp;
492         len -= tmp;
493         tmp = PAGE_SIZE;
494         for (;;)
495           {
496             if ((paddr = virt_to_phys_060(addr)))
497               break;
498             if (len <= tmp)
499               return 0;
500             addr += tmp;
501             len -= tmp;
502           }
503       }
504       len = (len + 15) >> 4;
505       i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
506       while (len--)
507         {
508           switch (cache)
509             {
510             case FLUSH_CACHE_DATA:
511               __asm__ __volatile__ (".chip 68060\n\t"
512                                     "cpushl %%dc,(%0)\n\t"
513                                     ".chip 68k"
514                                     : : "a" (paddr));
515               break;
516             case FLUSH_CACHE_INSN:
517               __asm__ __volatile__ (".chip 68060\n\t"
518                                     "cpushl %%ic,(%0)\n\t"
519                                     ".chip 68k"
520                                     : : "a" (paddr));
521               break;
522             default:
523             case FLUSH_CACHE_BOTH:
524               __asm__ __volatile__ (".chip 68060\n\t"
525                                     "cpushl %%bc,(%0)\n\t"
526                                     ".chip 68k"
527                                     : : "a" (paddr));
528               break;
529             }
530           if (!--i && len)
531             {
532
533               /*
534                * We just want to jump to the first cache line
535                * in the next page.
536                */
537               addr += PAGE_SIZE;
538               addr &= PAGE_MASK;
539
540               i = PAGE_SIZE / 16;
541               /* Recompute physical address when crossing a page
542                  boundary. */
543               for (;;)
544                 {
545                   if ((paddr = virt_to_phys_060(addr)))
546                     break;
547                   if (len <= i)
548                     return 0;
549                   len -= i;
550                   addr += PAGE_SIZE;
551                 }
552             }
553           else
554             paddr += 16;
555         }
556       break;
557
558     default:
559     case FLUSH_SCOPE_PAGE:
560       len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
561       addr &= PAGE_MASK;        /* Workaround for bug in some
562                                    revisions of the 68060 */
563       for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
564         {
565           if (!(paddr = virt_to_phys_060(addr)))
566             continue;
567           switch (cache)
568             {
569             case FLUSH_CACHE_DATA:
570               __asm__ __volatile__ (".chip 68060\n\t"
571                                     "cpushp %%dc,(%0)\n\t"
572                                     ".chip 68k"
573                                     : : "a" (paddr));
574               break;
575             case FLUSH_CACHE_INSN:
576               __asm__ __volatile__ (".chip 68060\n\t"
577                                     "cpushp %%ic,(%0)\n\t"
578                                     ".chip 68k"
579                                     : : "a" (paddr));
580               break;
581             default:
582             case FLUSH_CACHE_BOTH:
583               __asm__ __volatile__ (".chip 68060\n\t"
584                                     "cpushp %%bc,(%0)\n\t"
585                                     ".chip 68k"
586                                     : : "a" (paddr));
587               break;
588             }
589         }
590       break;
591     }
592   return 0;
593 }
594
595 /* sys_cacheflush -- flush (part of) the processor cache.  */
596 asmlinkage int
597 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
598 {
599         struct vm_area_struct *vma;
600         int ret = -EINVAL;
601
602         lock_kernel();
603         if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
604             cache & ~FLUSH_CACHE_BOTH)
605                 goto out;
606
607         if (scope == FLUSH_SCOPE_ALL) {
608                 /* Only the superuser may explicitly flush the whole cache. */
609                 ret = -EPERM;
610                 if (!capable(CAP_SYS_ADMIN))
611                         goto out;
612         } else {
613                 /*
614                  * Verify that the specified address region actually belongs
615                  * to this process.
616                  */
617                 vma = find_vma (current->mm, addr);
618                 ret = -EINVAL;
619                 /* Check for overflow.  */
620                 if (addr + len < addr)
621                         goto out;
622                 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
623                         goto out;
624         }
625
626         if (CPU_IS_020_OR_030) {
627                 if (scope == FLUSH_SCOPE_LINE && len < 256) {
628                         unsigned long cacr;
629                         __asm__ ("movec %%cacr, %0" : "=r" (cacr));
630                         if (cache & FLUSH_CACHE_INSN)
631                                 cacr |= 4;
632                         if (cache & FLUSH_CACHE_DATA)
633                                 cacr |= 0x400;
634                         len >>= 2;
635                         while (len--) {
636                                 __asm__ __volatile__ ("movec %1, %%caar\n\t"
637                                                       "movec %0, %%cacr"
638                                                       : /* no outputs */
639                                                       : "r" (cacr), "r" (addr));
640                                 addr += 4;
641                         }
642                 } else {
643                         /* Flush the whole cache, even if page granularity requested. */
644                         unsigned long cacr;
645                         __asm__ ("movec %%cacr, %0" : "=r" (cacr));
646                         if (cache & FLUSH_CACHE_INSN)
647                                 cacr |= 8;
648                         if (cache & FLUSH_CACHE_DATA)
649                                 cacr |= 0x800;
650                         __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
651                 }
652                 ret = 0;
653                 goto out;
654         } else {
655             /*
656              * 040 or 060: don't blindly trust 'scope', someone could
657              * try to flush a few megs of memory.
658              */
659
660             if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
661                 scope=FLUSH_SCOPE_PAGE;
662             if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
663                 scope=FLUSH_SCOPE_ALL;
664             if (CPU_IS_040) {
665                 ret = cache_flush_040 (addr, scope, cache, len);
666             } else if (CPU_IS_060) {
667                 ret = cache_flush_060 (addr, scope, cache, len);
668             }
669         }
670 out:
671         unlock_kernel();
672         return ret;
673 }
674
675 asmlinkage int sys_getpagesize(void)
676 {
677         return PAGE_SIZE;
678 }
679
680 /*
681  * Old cruft
682  */
683 asmlinkage int sys_pause(void)
684 {
685         current->state = TASK_INTERRUPTIBLE;
686         schedule();
687         return -ERESTARTNOHAND;
688 }