2 * linux/arch/m68k/kernel/sys_m68k.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
9 #include <linux/errno.h>
10 #include <linux/sched.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/mman.h>
19 #include <linux/file.h>
20 #include <linux/utsname.h>
22 #include <asm/setup.h>
23 #include <asm/uaccess.h>
24 #include <asm/cachectl.h>
25 #include <asm/traps.h>
30 * sys_pipe() is the normal C calling standard for creating
31 * a pipe. It's not the way unix traditionally does this, though.
33 asmlinkage int sys_pipe(unsigned long * fildes)
40 if (copy_to_user(fildes, fd, 2*sizeof(int)))
46 /* common code for old and new mmaps */
47 static inline long do_mmap2(
48 unsigned long addr, unsigned long len,
49 unsigned long prot, unsigned long flags,
50 unsigned long fd, unsigned long pgoff)
53 struct file * file = NULL;
55 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
56 if (!(flags & MAP_ANONYMOUS)) {
62 down_write(¤t->mm->mmap_sem);
63 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
64 up_write(¤t->mm->mmap_sem);
72 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
73 unsigned long prot, unsigned long flags,
74 unsigned long fd, unsigned long pgoff)
76 return do_mmap2(addr, len, prot, flags, fd, pgoff);
80 * Perform the select(nd, in, out, ex, tv) and mmap() system
81 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
82 * handle more than 4 system call parameters, so these system calls
83 * used a memory block for parameter passing..
86 struct mmap_arg_struct {
95 asmlinkage int old_mmap(struct mmap_arg_struct *arg)
97 struct mmap_arg_struct a;
100 if (copy_from_user(&a, arg, sizeof(a)))
104 if (a.offset & ~PAGE_MASK)
107 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
109 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
115 struct mmap_arg_struct64 {
120 __u64 offset; /* 64 bits */
124 asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
127 struct file * file = NULL;
128 struct mmap_arg_struct64 a;
131 if (copy_from_user(&a, arg, sizeof(a)))
134 if ((long)a.offset & ~PAGE_MASK)
137 pgoff = a.offset >> PAGE_SHIFT;
138 if ((a.offset >> PAGE_SHIFT) != pgoff)
141 if (!(a.flags & MAP_ANONYMOUS)) {
147 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
149 down_write(¤t->mm->mmap_sem);
150 error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
151 up_write(¤t->mm->mmap_sem);
159 extern asmlinkage int sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
161 struct sel_arg_struct {
163 fd_set *inp, *outp, *exp;
167 asmlinkage int old_select(struct sel_arg_struct *arg)
169 struct sel_arg_struct a;
171 if (copy_from_user(&a, arg, sizeof(a)))
173 /* sys_select() does the appropriate kernel locking */
174 return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
178 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
180 * This is really horribly ugly.
182 asmlinkage int sys_ipc (uint call, int first, int second,
183 int third, void *ptr, long fifth)
187 version = call >> 16; /* hack for backward compatibility */
193 return sys_semop (first, (struct sembuf *)ptr, second);
195 return sys_semget (first, second, third);
200 if (get_user(fourth.__pad, (void **) ptr))
202 return sys_semctl (first, second, third, fourth);
210 return sys_msgsnd (first, (struct msgbuf *) ptr,
215 struct ipc_kludge tmp;
218 if (copy_from_user (&tmp,
219 (struct ipc_kludge *)ptr,
222 return sys_msgrcv (first, tmp.msgp, second,
226 return sys_msgrcv (first,
227 (struct msgbuf *) ptr,
228 second, fifth, third);
231 return sys_msgget ((key_t) first, second);
233 return sys_msgctl (first, second,
234 (struct msqid_ds *) ptr);
244 ret = sys_shmat (first, (char *) ptr,
248 return put_user (raddr, (ulong *) third);
252 return sys_shmdt ((char *)ptr);
254 return sys_shmget (first, second, third);
256 return sys_shmctl (first, second,
257 (struct shmid_ds *) ptr);
265 asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int on)
271 /* Convert virtual (user) address VADDR to physical address PADDR */
272 #define virt_to_phys_040(vaddr) \
274 unsigned long _mmusr, _paddr; \
276 __asm__ __volatile__ (".chip 68040\n\t" \
278 "movec %%mmusr,%0\n\t" \
282 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
287 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
289 unsigned long paddr, i;
293 case FLUSH_SCOPE_ALL:
296 case FLUSH_CACHE_DATA:
297 /* This nop is needed for some broken versions of the 68040. */
298 __asm__ __volatile__ ("nop\n\t"
303 case FLUSH_CACHE_INSN:
304 __asm__ __volatile__ ("nop\n\t"
310 case FLUSH_CACHE_BOTH:
311 __asm__ __volatile__ ("nop\n\t"
319 case FLUSH_SCOPE_LINE:
320 /* Find the physical address of the first mapped page in the
322 if ((paddr = virt_to_phys_040(addr))) {
323 paddr += addr & ~(PAGE_MASK | 15);
324 len = (len + (addr & 15) + 15) >> 4;
326 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
335 if ((paddr = virt_to_phys_040(addr)))
342 len = (len + 15) >> 4;
344 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
349 case FLUSH_CACHE_DATA:
350 __asm__ __volatile__ ("nop\n\t"
352 "cpushl %%dc,(%0)\n\t"
356 case FLUSH_CACHE_INSN:
357 __asm__ __volatile__ ("nop\n\t"
359 "cpushl %%ic,(%0)\n\t"
364 case FLUSH_CACHE_BOTH:
365 __asm__ __volatile__ ("nop\n\t"
367 "cpushl %%bc,(%0)\n\t"
375 * No need to page align here since it is done by
376 * virt_to_phys_040().
380 /* Recompute physical address when crossing a page
384 if ((paddr = virt_to_phys_040(addr)))
398 case FLUSH_SCOPE_PAGE:
399 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
400 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
402 if (!(paddr = virt_to_phys_040(addr)))
406 case FLUSH_CACHE_DATA:
407 __asm__ __volatile__ ("nop\n\t"
409 "cpushp %%dc,(%0)\n\t"
413 case FLUSH_CACHE_INSN:
414 __asm__ __volatile__ ("nop\n\t"
416 "cpushp %%ic,(%0)\n\t"
421 case FLUSH_CACHE_BOTH:
422 __asm__ __volatile__ ("nop\n\t"
424 "cpushp %%bc,(%0)\n\t"
435 #define virt_to_phys_060(vaddr) \
437 unsigned long paddr; \
438 __asm__ __volatile__ (".chip 68060\n\t" \
447 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
449 unsigned long paddr, i;
453 * cpush %dc : flush DC, remains valid (with our %cacr setup)
454 * cpush %ic : invalidate IC
455 * cpush %bc : flush DC + invalidate IC
459 case FLUSH_SCOPE_ALL:
462 case FLUSH_CACHE_DATA:
463 __asm__ __volatile__ (".chip 68060\n\t"
467 case FLUSH_CACHE_INSN:
468 __asm__ __volatile__ (".chip 68060\n\t"
473 case FLUSH_CACHE_BOTH:
474 __asm__ __volatile__ (".chip 68060\n\t"
481 case FLUSH_SCOPE_LINE:
482 /* Find the physical address of the first mapped page in the
486 if (!(paddr = virt_to_phys_060(addr))) {
487 unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
496 if ((paddr = virt_to_phys_060(addr)))
504 len = (len + 15) >> 4;
505 i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
510 case FLUSH_CACHE_DATA:
511 __asm__ __volatile__ (".chip 68060\n\t"
512 "cpushl %%dc,(%0)\n\t"
516 case FLUSH_CACHE_INSN:
517 __asm__ __volatile__ (".chip 68060\n\t"
518 "cpushl %%ic,(%0)\n\t"
523 case FLUSH_CACHE_BOTH:
524 __asm__ __volatile__ (".chip 68060\n\t"
525 "cpushl %%bc,(%0)\n\t"
534 * We just want to jump to the first cache line
541 /* Recompute physical address when crossing a page
545 if ((paddr = virt_to_phys_060(addr)))
559 case FLUSH_SCOPE_PAGE:
560 len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
561 addr &= PAGE_MASK; /* Workaround for bug in some
562 revisions of the 68060 */
563 for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
565 if (!(paddr = virt_to_phys_060(addr)))
569 case FLUSH_CACHE_DATA:
570 __asm__ __volatile__ (".chip 68060\n\t"
571 "cpushp %%dc,(%0)\n\t"
575 case FLUSH_CACHE_INSN:
576 __asm__ __volatile__ (".chip 68060\n\t"
577 "cpushp %%ic,(%0)\n\t"
582 case FLUSH_CACHE_BOTH:
583 __asm__ __volatile__ (".chip 68060\n\t"
584 "cpushp %%bc,(%0)\n\t"
595 /* sys_cacheflush -- flush (part of) the processor cache. */
597 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
599 struct vm_area_struct *vma;
603 if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
604 cache & ~FLUSH_CACHE_BOTH)
607 if (scope == FLUSH_SCOPE_ALL) {
608 /* Only the superuser may explicitly flush the whole cache. */
610 if (!capable(CAP_SYS_ADMIN))
614 * Verify that the specified address region actually belongs
617 vma = find_vma (current->mm, addr);
619 /* Check for overflow. */
620 if (addr + len < addr)
622 if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
626 if (CPU_IS_020_OR_030) {
627 if (scope == FLUSH_SCOPE_LINE && len < 256) {
629 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
630 if (cache & FLUSH_CACHE_INSN)
632 if (cache & FLUSH_CACHE_DATA)
636 __asm__ __volatile__ ("movec %1, %%caar\n\t"
639 : "r" (cacr), "r" (addr));
643 /* Flush the whole cache, even if page granularity requested. */
645 __asm__ ("movec %%cacr, %0" : "=r" (cacr));
646 if (cache & FLUSH_CACHE_INSN)
648 if (cache & FLUSH_CACHE_DATA)
650 __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
656 * 040 or 060: don't blindly trust 'scope', someone could
657 * try to flush a few megs of memory.
660 if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
661 scope=FLUSH_SCOPE_PAGE;
662 if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
663 scope=FLUSH_SCOPE_ALL;
665 ret = cache_flush_040 (addr, scope, cache, len);
666 } else if (CPU_IS_060) {
667 ret = cache_flush_060 (addr, scope, cache, len);
675 asmlinkage int sys_getpagesize(void)
683 asmlinkage int sys_pause(void)
685 current->state = TASK_INTERRUPTIBLE;
687 return -ERESTARTNOHAND;