import of upstream 2.4.34.4 from kernel.org
[linux-2.4.git] / arch / mips / kernel / process.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 2000 by Ralf Baechle and others.
7  * Copyright (C) 1999 Silicon Graphics, Inc.
8  */
9 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/mm.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/personality.h>
16 #include <linux/slab.h>
17 #include <linux/mman.h>
18 #include <linux/sys.h>
19 #include <linux/user.h>
20 #include <linux/a.out.h>
21 #include <linux/init.h>
22 #include <linux/completion.h>
23
24 #include <asm/bootinfo.h>
25 #include <asm/cpu.h>
26 #include <asm/fpu.h>
27 #include <asm/pgtable.h>
28 #include <asm/system.h>
29 #include <asm/mipsregs.h>
30 #include <asm/processor.h>
31 #include <asm/ptrace.h>
32 #include <asm/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/elf.h>
35 #include <asm/isadep.h>
36 #include <asm/inst.h>
37
38 ATTRIB_NORET void cpu_idle(void)
39 {
40         /* endless idle loop with no priority at all */
41         current->nice = 20;
42         current->counter = -100;
43         init_idle();
44
45         while (1) {
46                 while (!current->need_resched)
47                         if (cpu_wait)
48                                 (*cpu_wait)();
49                 schedule();
50                 check_pgt_cache();
51         }
52 }
53
54 asmlinkage void ret_from_fork(void);
55
56 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
57 {
58         regs->cp0_status &= ~(ST0_CU0|ST0_KSU|ST0_CU1);
59         regs->cp0_status |= KU_USER;
60         current->used_math = 0;
61         lose_fpu();
62         regs->cp0_epc = pc;
63         regs->regs[29] = sp;
64         current->thread.current_ds = USER_DS;
65 }
66
67 void exit_thread(void)
68 {
69 }
70
71 void flush_thread(void)
72 {
73 }
74
75 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
76                  unsigned long unused,
77                  struct task_struct * p, struct pt_regs * regs)
78 {
79         struct pt_regs * childregs;
80         long childksp;
81
82         childksp = (unsigned long)p + KERNEL_STACK_SIZE - 32;
83
84         if (is_fpu_owner()) {
85                 save_fp(p);
86         }
87         
88         /* set up new TSS. */
89         childregs = (struct pt_regs *) childksp - 1;
90         *childregs = *regs;
91         childregs->regs[7] = 0; /* Clear error flag */
92         if(current->personality == PER_LINUX) {
93                 childregs->regs[2] = 0; /* Child gets zero as return value */
94                 regs->regs[2] = p->pid;
95         } else {
96                 /* Under IRIX things are a little different. */
97                 childregs->regs[2] = 0;
98                 childregs->regs[3] = 1;
99                 regs->regs[2] = p->pid;
100                 regs->regs[3] = 0;
101         }
102         if (childregs->cp0_status & ST0_CU0) {
103                 childregs->regs[28] = (unsigned long) p;
104                 childregs->regs[29] = childksp;
105                 p->thread.current_ds = KERNEL_DS;
106         } else {
107                 childregs->regs[29] = usp;
108                 p->thread.current_ds = USER_DS;
109         }
110         p->thread.reg29 = (unsigned long) childregs;
111         p->thread.reg31 = (unsigned long) ret_from_fork;
112
113         /*
114          * New tasks lose permission to use the fpu. This accelerates context
115          * switching for most programs since they don't use the fpu.
116          */
117         p->thread.cp0_status = read_c0_status() &
118                             ~(ST0_CU2|ST0_CU1|KU_MASK);
119         childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
120
121         return 0;
122 }
123
124 /* Fill in the fpu structure for a core dump.. */
125 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
126 {
127         memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu));
128         return 1;
129 }
130
131 /*
132  * Create a kernel thread
133  */
134 int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
135 {
136         long retval;
137
138         __asm__ __volatile__(
139                 "       .set    noreorder       \n"
140                 "       move    $6, $sp         \n"
141                 "       move    $4, %5          \n"
142                 "       li      $2, %1          \n"
143                 "       syscall                 \n"
144                 "       beq     $6, $sp, 1f     \n"
145                 "        subu    $sp, 32        \n"
146                 "       jalr    %4              \n"
147                 "        move    $4, %3         \n"
148                 "       move    $4, $2          \n"
149                 "       li      $2, %2          \n"
150                 "       syscall                 \n"
151                 "1:     addiu   $sp, 32         \n"
152                 "       move    %0, $2          \n"
153                 "       .set    reorder"
154                 : "=r" (retval)
155                 : "i" (__NR_clone), "i" (__NR_exit), "r" (arg), "r" (fn),
156                   "r" (flags | CLONE_VM)
157                  /*
158                   * The called subroutine might have destroyed any of the
159                   * at, result, argument or temporary registers ...
160                   */
161                 : "$2", "$3", "$4", "$5", "$6", "$7", "$8",
162                   "$9","$10","$11","$12","$13","$14","$15","$24","$25", "$31");
163
164         return retval;
165 }
166
167 /*
168  * These bracket the sleeping functions..
169  */
170 extern void scheduling_functions_start_here(void);
171 extern void scheduling_functions_end_here(void);
172 #define first_sched     ((unsigned long) scheduling_functions_start_here)
173 #define last_sched      ((unsigned long) scheduling_functions_end_here)
174
175 struct mips_frame_info schedule_frame;
176 static struct mips_frame_info schedule_timeout_frame;
177 static struct mips_frame_info sleep_on_frame;
178 static struct mips_frame_info sleep_on_timeout_frame;
179 static struct mips_frame_info wait_for_completion_frame;
180 static int mips_frame_info_initialized;
181 static int __init get_frame_info(struct mips_frame_info *info, void *func)
182 {
183         int i;
184         union mips_instruction *ip = (union mips_instruction *)func;
185         info->pc_offset = -1;
186         info->frame_offset = -1;
187         for (i = 0; i < 128; i++, ip++) {
188                 /* if jal, jalr, jr, stop. */
189                 if (ip->j_format.opcode == jal_op ||
190                     (ip->r_format.opcode == spec_op &&
191                      (ip->r_format.func == jalr_op ||
192                       ip->r_format.func == jr_op)))
193                         break;
194                 if (ip->i_format.opcode == sw_op &&
195                     ip->i_format.rs == 29) {
196                         /* sw $ra, offset($sp) */
197                         if (ip->i_format.rt == 31) {
198                                 if (info->pc_offset != -1)
199                                         break;
200                                 info->pc_offset =
201                                         ip->i_format.simmediate / sizeof(long);
202                         }
203                         /* sw $s8, offset($sp) */
204                         if (ip->i_format.rt == 30) {
205                                 if (info->frame_offset != -1)
206                                         break;
207                                 info->frame_offset =
208                                         ip->i_format.simmediate / sizeof(long);
209                         }
210                 }
211         }
212         if (info->pc_offset == -1 || info->frame_offset == -1) {
213                 printk("Can't analyze prologue code at %p\n", func);
214                 info->pc_offset = -1;
215                 info->frame_offset = -1;
216                 return -1;
217         }
218
219         return 0;
220 }
221 void __init frame_info_init(void)
222 {
223         mips_frame_info_initialized =
224                 !get_frame_info(&schedule_frame, schedule) &&
225                 !get_frame_info(&schedule_timeout_frame, schedule_timeout) &&
226                 !get_frame_info(&sleep_on_frame, sleep_on) &&
227                 !get_frame_info(&sleep_on_timeout_frame, sleep_on_timeout) &&
228                 !get_frame_info(&wait_for_completion_frame, wait_for_completion);
229 }
230
231 /* get_wchan - a maintenance nightmare^W^Wpain in the ass ...  */
232 unsigned long get_wchan(struct task_struct *p)
233 {
234         unsigned long frame, pc;
235
236         if (!p || p == current || p->state == TASK_RUNNING)
237                 return 0;
238
239         if (!mips_frame_info_initialized)
240                 return 0;
241         pc = thread_saved_pc(&p->thread);
242         if (pc < first_sched || pc >= last_sched) {
243                 return pc;
244         }
245
246         if (pc >= (unsigned long) sleep_on_timeout)
247                 goto schedule_timeout_caller;
248         if (pc >= (unsigned long) sleep_on)
249                 goto schedule_caller;
250         if (pc >= (unsigned long) interruptible_sleep_on_timeout)
251                 goto schedule_timeout_caller;
252         if (pc >= (unsigned long)interruptible_sleep_on)
253                 goto schedule_caller;
254         if (pc >= (unsigned long)wait_for_completion)
255                 goto schedule_caller;
256         goto schedule_timeout_caller;
257
258 schedule_caller:
259         frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset];
260         if (pc >= (unsigned long) sleep_on)
261                 pc = ((unsigned long *)frame)[sleep_on_frame.pc_offset];
262         else
263                 pc = ((unsigned long *)frame)[wait_for_completion_frame.pc_offset];
264         return pc;
265
266 schedule_timeout_caller:
267         /*
268          * The schedule_timeout frame
269          */
270         frame = ((unsigned long *)p->thread.reg30)[schedule_frame.frame_offset];
271
272         /*
273          * frame now points to sleep_on_timeout's frame
274          */
275         pc    = ((unsigned long *)frame)[schedule_timeout_frame.pc_offset];
276
277         if (pc >= first_sched && pc < last_sched) {
278                 /* schedule_timeout called by [interruptible_]sleep_on_timeout */
279                 frame = ((unsigned long *)frame)[schedule_timeout_frame.frame_offset];
280                 pc    = ((unsigned long *)frame)[sleep_on_timeout_frame.pc_offset];
281         }
282
283         return pc;
284 }