2 * BK Id: SCCS/s.process.c 1.48 01/29/03 07:46:20 trini
5 * linux/arch/ppc/kernel/process.c
7 * Derived from "arch/i386/kernel/process.c"
8 * Copyright (C) 1995 Linus Torvalds
10 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
11 * Paul Mackerras (paulus@cs.anu.edu.au)
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version.
23 #include <linux/config.h>
24 #include <linux/errno.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
28 #include <linux/smp.h>
29 #include <linux/smp_lock.h>
30 #include <linux/stddef.h>
31 #include <linux/unistd.h>
32 #include <linux/ptrace.h>
33 #include <linux/slab.h>
34 #include <linux/user.h>
35 #include <linux/elf.h>
36 #include <linux/init.h>
37 #include <linux/prctl.h>
39 #include <asm/pgtable.h>
40 #include <asm/uaccess.h>
41 #include <asm/system.h>
43 #include <asm/processor.h>
46 #ifdef CONFIG_PPC_ISERIES
47 #include <asm/iSeries/Paca.h>
50 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs);
51 extern unsigned long _get_SP(void);
53 struct task_struct *last_task_used_math = NULL;
54 struct task_struct *last_task_used_altivec = NULL;
55 static struct fs_struct init_fs = INIT_FS;
56 static struct files_struct init_files = INIT_FILES;
57 static struct signal_struct init_signals = INIT_SIGNALS;
58 struct mm_struct init_mm = INIT_MM(init_mm);
59 /* this is 16-byte aligned because it has a stack in it */
60 union task_union __attribute((aligned(16))) init_task_union = {
61 INIT_TASK(init_task_union.task)
63 /* only used to get secondary processor up */
64 struct task_struct *current_set[NR_CPUS] = {&init_task, };
66 #undef SHOW_TASK_SWITCHES
69 #if defined(CHECK_STACK)
71 kernel_stack_top(struct task_struct *tsk)
73 return ((unsigned long)tsk) + sizeof(union task_union);
77 task_top(struct task_struct *tsk)
79 return ((unsigned long)tsk) + sizeof(struct task_struct);
82 /* check to make sure the kernel stack is healthy */
83 int check_stack(struct task_struct *tsk)
85 unsigned long stack_top = kernel_stack_top(tsk);
86 unsigned long tsk_top = task_top(tsk);
90 /* check thread magic */
91 if ( tsk->thread.magic != THREAD_MAGIC )
94 printk("thread.magic bad: %08x\n", tsk->thread.magic);
99 printk("check_stack(): tsk bad tsk %p\n",tsk);
101 /* check if stored ksp is bad */
102 if ( (tsk->thread.ksp > stack_top) || (tsk->thread.ksp < tsk_top) )
104 printk("stack out of bounds: %s/%d\n"
105 " tsk_top %08lx ksp %08lx stack_top %08lx\n",
107 tsk_top, tsk->thread.ksp, stack_top);
111 /* check if stack ptr RIGHT NOW is bad */
112 if ( (tsk == current) && ((_get_SP() > stack_top ) || (_get_SP() < tsk_top)) )
114 printk("current stack ptr out of bounds: %s/%d\n"
115 " tsk_top %08lx sp %08lx stack_top %08lx\n",
116 current->comm,current->pid,
117 tsk_top, _get_SP(), stack_top);
122 /* check amount of free stack */
123 for ( i = (unsigned long *)task_top(tsk) ; i < kernel_stack_top(tsk) ; i++ )
126 printk("check_stack(): i = %p\n", i);
129 /* only notify if it's less than 900 bytes */
130 if ( (i - (unsigned long *)task_top(tsk)) < 900 )
131 printk("%d bytes free on stack\n",
140 panic("bad kernel stack");
144 #endif /* defined(CHECK_STACK) */
146 #ifdef CONFIG_ALTIVEC
148 dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
150 if (regs->msr & MSR_VEC)
151 giveup_altivec(current);
152 memcpy(vrregs, ¤t->thread.vr[0], sizeof(*vrregs));
157 enable_kernel_altivec(void)
160 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
161 giveup_altivec(current);
163 giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
165 giveup_altivec(last_task_used_altivec);
166 #endif /* __SMP __ */
168 #endif /* CONFIG_ALTIVEC */
171 enable_kernel_fp(void)
174 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
177 giveup_fpu(NULL); /* just enables FP for kernel */
179 giveup_fpu(last_task_used_math);
180 #endif /* CONFIG_SMP */
184 dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
186 if (regs->msr & MSR_FP)
188 memcpy(fpregs, ¤t->thread.fpr[0], sizeof(*fpregs));
193 _switch_to(struct task_struct *prev, struct task_struct *new,
194 struct task_struct **last)
196 struct thread_struct *new_thread, *old_thread;
207 /* avoid complexity of lazy save/restore of fpu
208 * by just saving it every time we switch out if
209 * this task used the fpu during the last quantum.
211 * If it tries to use the fpu again, it'll trap and
212 * reload its fp regs. So we don't have to do a restore
213 * every switch, just a save.
216 if ( prev->thread.regs && (prev->thread.regs->msr & MSR_FP) )
218 #ifdef CONFIG_ALTIVEC
220 * If the previous thread used altivec in the last quantum
221 * (thus changing altivec regs) then save them.
222 * We used to check the VRSAVE register but not all apps
223 * set it, so we don't rely on it now (and in fact we need
224 * to save & restore VSCR even if VRSAVE == 0). -- paulus
226 * On SMP we always save/restore altivec regs just to avoid the
227 * complexity of changing processors.
230 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)))
231 giveup_altivec(prev);
232 #endif /* CONFIG_ALTIVEC */
233 #endif /* CONFIG_SMP */
235 current_set[smp_processor_id()] = new;
237 /* Avoid the trap. On smp this this never happens since
238 * we don't set last_task_used_altivec -- Cort
240 if (new->thread.regs && last_task_used_altivec == new)
241 new->thread.regs->msr |= MSR_VEC;
242 new_thread = &new->thread;
243 old_thread = ¤t->thread;
244 *last = _switch(old_thread, new_thread);
248 void show_regs(struct pt_regs * regs)
252 printk("NIP: %08lX XER: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx %s\n",
253 regs->nip, regs->xer, regs->link, regs->gpr[1], regs,regs->trap, print_tainted());
254 printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n",
255 regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,
256 regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
257 regs->msr&MSR_IR ? 1 : 0,
258 regs->msr&MSR_DR ? 1 : 0);
261 * TRAP 0x800 is the hijacked FPU unavailable exception vector
262 * on 40x used to implement the heavyweight data access
263 * functionality. It is an emulated value (like all trap
266 if (regs->trap == 0x300 || regs->trap == 0x600 || regs->trap == 0x800)
267 printk("DEAR: %08lX, ESR: %08lX\n", regs->dar, regs->dsisr);
269 if (regs->trap == 0x300 || regs->trap == 0x600)
270 printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr);
272 printk("TASK = %p[%d] '%s' ",
273 current, current->pid, current->comm);
274 printk("Last syscall: %ld ", current->thread.last_syscall);
275 printk("\nlast math %p last altivec %p", last_task_used_math,
276 last_task_used_altivec);
278 #if defined(CONFIG_4xx) && defined(DCRN_PLB0_BEAR)
279 printk("\nPLB0: bear= 0x%8.8x acr= 0x%8.8x besr= 0x%8.8x\n",
280 mfdcr(DCRN_PLB0_BEAR), mfdcr(DCRN_PLB0_ACR),
281 mfdcr(DCRN_PLB0_BESR));
283 #if defined(CONFIG_4xx) && defined(DCRN_POB0_BEAR)
284 printk("PLB0 to OPB: bear= 0x%8.8x besr0= 0x%8.8x besr1= 0x%8.8x\n",
285 mfdcr(DCRN_POB0_BEAR), mfdcr(DCRN_POB0_BESR0),
286 mfdcr(DCRN_POB0_BESR1));
290 printk(" CPU: %d", current->processor);
291 #endif /* CONFIG_SMP */
294 for (i = 0; i < 32; i++)
299 printk("GPR%02d: ", i);
302 if ( __get_user(r, &(regs->gpr[i])) )
311 print_backtrace((unsigned long *)regs->gpr[1]);
314 void exit_thread(void)
316 if (last_task_used_math == current)
317 last_task_used_math = NULL;
318 if (last_task_used_altivec == current)
319 last_task_used_altivec = NULL;
322 void flush_thread(void)
324 if (last_task_used_math == current)
325 last_task_used_math = NULL;
326 if (last_task_used_altivec == current)
327 last_task_used_altivec = NULL;
331 release_thread(struct task_struct *t)
339 copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
340 unsigned long unused,
341 struct task_struct *p, struct pt_regs *regs)
343 struct pt_regs *childregs, *kregs;
344 extern void ret_from_fork(void);
345 unsigned long sp = (unsigned long)p + sizeof(union task_union);
346 unsigned long childframe;
349 sp -= sizeof(struct pt_regs);
350 childregs = (struct pt_regs *) sp;
352 if ((childregs->msr & MSR_PR) == 0) {
353 /* for kernel thread, set `current' and stackptr in new task */
354 childregs->gpr[1] = sp + sizeof(struct pt_regs);
355 childregs->gpr[2] = (unsigned long) p;
356 p->thread.regs = NULL; /* no user register state */
358 p->thread.regs = childregs;
359 childregs->gpr[3] = 0; /* Result from fork() */
360 sp -= STACK_FRAME_OVERHEAD;
364 * The way this works is that at some point in the future
365 * some task will call _switch to switch to the new task.
366 * That will pop off the stack frame created below and start
367 * the new task running at ret_from_fork. The new task will
368 * do some house keeping and then return from the fork or clone
369 * system call, using the stack frame created above.
371 sp -= sizeof(struct pt_regs);
372 kregs = (struct pt_regs *) sp;
373 sp -= STACK_FRAME_OVERHEAD;
375 kregs->nip = (unsigned long)ret_from_fork;
376 #ifdef CONFIG_PPC_ISERIES
377 kregs->softEnable = ((struct Paca *)mfspr(SPRG1))->xProcEnabled;
381 * copy fpu info - assume lazy fpu switch now always
384 if (regs->msr & MSR_FP) {
386 childregs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
388 memcpy(&p->thread.fpr, ¤t->thread.fpr, sizeof(p->thread.fpr));
389 p->thread.fpscr = current->thread.fpscr;
391 #ifdef CONFIG_ALTIVEC
393 * copy altiVec info - assume lazy altiVec switch
396 if (regs->msr & MSR_VEC)
397 giveup_altivec(current);
398 memcpy(&p->thread.vr, ¤t->thread.vr, sizeof(p->thread.vr));
399 p->thread.vscr = current->thread.vscr;
400 childregs->msr &= ~MSR_VEC;
401 #endif /* CONFIG_ALTIVEC */
403 p->thread.last_syscall = -1;
409 * Set up a thread for executing a new program
411 void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
414 memset(regs->gpr, 0, sizeof(regs->gpr));
421 regs->msr = MSR_USER;
422 if (last_task_used_math == current)
423 last_task_used_math = 0;
424 if (last_task_used_altivec == current)
425 last_task_used_altivec = 0;
426 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
427 current->thread.fpscr = 0;
428 #ifdef CONFIG_ALTIVEC
429 memset(current->thread.vr, 0, sizeof(current->thread.vr));
430 memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr));
431 current->thread.vrsave = 0;
432 #endif /* CONFIG_ALTIVEC */
436 * Support for the PR_GET/SET_FPEXC prctl() calls.
438 static inline unsigned int __unpack_fe01(unsigned int msr_bits)
440 return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
443 static inline unsigned int __pack_fe01(unsigned int fpmode)
445 return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
448 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
450 struct pt_regs *regs = tsk->thread.regs;
452 if (val > PR_FP_EXC_PRECISE)
454 tsk->thread.fpexc_mode = __pack_fe01(val);
455 if (regs != NULL && (regs->msr & MSR_FP) != 0)
456 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
457 | tsk->thread.fpexc_mode;
461 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
465 val = __unpack_fe01(tsk->thread.fpexc_mode);
466 return put_user(val, (unsigned int *) adr);
469 int sys_clone(int p1, int p2, int p3, int p4, int p5, int p6,
470 struct pt_regs *regs)
472 return do_fork(p1, regs->gpr[1], regs, 0);
475 int sys_fork(int p1, int p2, int p3, int p4, int p5, int p6,
476 struct pt_regs *regs)
478 return do_fork(SIGCHLD, regs->gpr[1], regs, 0);
481 int sys_vfork(int p1, int p2, int p3, int p4, int p5, int p6,
482 struct pt_regs *regs)
484 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], regs, 0);
487 int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
488 unsigned long a3, unsigned long a4, unsigned long a5,
489 struct pt_regs *regs)
494 filename = getname((char *) a0);
495 error = PTR_ERR(filename);
496 if (IS_ERR(filename))
498 if (regs->msr & MSR_FP)
500 #ifdef CONFIG_ALTIVEC
501 if (regs->msr & MSR_VEC)
502 giveup_altivec(current);
503 #endif /* CONFIG_ALTIVEC */
504 error = do_execve(filename, (char **) a1, (char **) a2, regs);
506 current->ptrace &= ~PT_DTRACE;
513 print_backtrace(unsigned long *sp)
518 printk("Call backtrace: ");
520 if (__get_user( i, &sp[1] ))
526 if (__get_user(sp, (unsigned long **)sp))
532 void show_trace_task(struct task_struct *tsk)
534 unsigned long stack_top = (unsigned long) tsk + THREAD_SIZE;
535 unsigned long sp, prev_sp;
540 sp = (unsigned long) &tsk->thread.ksp;
543 sp = *(unsigned long *)sp;
544 if (sp <= prev_sp || sp >= stack_top || (sp & 3) != 0)
547 printk("[%08lx] ", *(unsigned long *)(sp + 4));
548 } while (++count < 16);
555 * Low level print for debugging - Cort
557 int __init ll_printk(const char *fmt, ...)
564 i=vsprintf(buf,fmt,args);
570 int lines = 24, cols = 80;
571 int orig_x = 0, orig_y = 0;
573 void puthex(unsigned long val)
575 unsigned char buf[10];
577 for (i = 7; i >= 0; i--)
579 buf[i] = "0123456789ABCDEF"[val & 0x0F];
586 void __init ll_puts(const char *s)
589 char *vidmem = (char *)/*(_ISA_MEM_BASE + 0xB8000) */0xD00B8000;
591 extern int mem_init_done;
593 if ( mem_init_done ) /* assume this means we can printk */
608 * can't ll_puts on chrp without openfirmware yet.
609 * vidmem just needs to be setup for it.
612 if ( _machine != _MACH_prep )
617 while ( ( c = *s++ ) != '\0' ) {
620 if ( ++y >= lines ) {
626 vidmem [ ( x + cols * y ) * 2 ] = c;
629 if ( ++y >= lines ) {
644 * These bracket the sleeping functions..
646 extern void scheduling_functions_start_here(void);
647 extern void scheduling_functions_end_here(void);
648 #define first_sched ((unsigned long) scheduling_functions_start_here)
649 #define last_sched ((unsigned long) scheduling_functions_end_here)
651 unsigned long get_wchan(struct task_struct *p)
653 unsigned long ip, sp;
654 unsigned long stack_page = (unsigned long) p;
656 if (!p || p == current || p->state == TASK_RUNNING)
660 sp = *(unsigned long *)sp;
661 if (sp < stack_page || sp >= stack_page + 8188)
664 ip = *(unsigned long *)(sp + 4);
665 if (ip < first_sched || ip >= last_sched)
668 } while (count++ < 16);