2 * arch/s390/kernel/process.c
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Hartmut Penner (hp@de.ibm.com),
8 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
10 * Derived from "arch/i386/kernel/process.c"
11 * Copyright (C) 1995, Linus Torvalds
15 * This file handles the architecture-dependent parts of process handling..
18 #define __KERNEL_SYSCALLS__
21 #include <linux/config.h>
22 #include <linux/errno.h>
23 #include <linux/sched.h>
24 #include <linux/kernel.h>
26 #include <linux/smp.h>
27 #include <linux/smp_lock.h>
28 #include <linux/stddef.h>
29 #include <linux/unistd.h>
30 #include <linux/ptrace.h>
31 #include <linux/slab.h>
32 #include <linux/vmalloc.h>
33 #include <linux/user.h>
34 #include <linux/a.out.h>
35 #include <linux/interrupt.h>
36 #include <linux/delay.h>
37 #include <linux/reboot.h>
38 #include <linux/init.h>
40 #include <asm/uaccess.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
44 #include <asm/processor.h>
47 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
50 * The idle loop on a S390...
53 int cpu_idle(void *unused)
58 /* endless idle loop with no priority at all */
61 current->counter = -100;
64 if (current->need_resched) {
72 * Wait for external, I/O or machine check interrupt and
73 * switch of machine check bit after the wait has ended.
75 wait_psw.mask = _WAIT_PSW_MASK;
85 : "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
89 extern void show_registers(struct pt_regs *regs);
90 extern void show_trace(unsigned long *sp);
92 void show_regs(struct pt_regs *regs)
94 struct task_struct *tsk = current;
96 printk("CPU: %d %s\n", tsk->processor, print_tainted());
97 printk("Process %s (pid: %d, task: %016lx, ksp: %016lx)\n",
98 current->comm, current->pid, (unsigned long) tsk,
101 show_registers(regs);
102 /* Show stack backtrace if pt_regs is from kernel mode */
103 if (!(regs->psw.mask & PSW_PROBLEM_STATE))
104 show_trace((unsigned long *) regs->gprs[15]);
107 int arch_kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
109 int clone_arg = flags | CLONE_VM;
112 __asm__ __volatile__(
115 " lg 4,%6\n" /* load kernel stack ptr of parent */
116 " svc %b2\n" /* Linux system call*/
117 " clg 4,%6\n" /* compare ksp's: child or parent ? */
118 " je 0f\n" /* parent - jump*/
119 " lg 15,%6\n" /* fix kernel stack pointer*/
121 " xc 0(160,15),0(15)\n" /* clear save area */
122 " lgr 2,%4\n" /* load argument*/
123 " basr 14,%5\n" /* call fn*/
124 " svc %b3\n" /* Linux system call*/
127 : "d" (clone_arg), "i" (__NR_clone), "i" (__NR_exit),
128 "d" (arg), "a" (fn), "i" (__LC_KERNEL_STACK) ,
129 "i" (-STACK_FRAME_OVERHEAD)
135 * Free current thread data structures etc..
137 void exit_thread(void)
141 void flush_thread(void)
144 current->used_math = 0;
145 current->flags &= ~PF_USEDFPU;
148 void release_thread(struct task_struct *dead_task)
152 int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
153 unsigned long unused,
154 struct task_struct * p, struct pt_regs * regs)
158 unsigned long back_chain;
162 unsigned long scratch[2];
163 unsigned long gprs[10]; /* gprs 6 -15 */
164 unsigned long fprs[2]; /* fpr 4 and 6 */
165 unsigned long empty[2];
166 struct pt_regs childregs;
169 frame = (struct stack_frame *) (4*PAGE_SIZE + (unsigned long) p) -1;
170 p->thread.ksp = (unsigned long) frame;
171 frame->childregs = *regs;
172 frame->childregs.gprs[15] = new_stackp;
173 frame->back_chain = frame->eos = 0;
175 /* new return point is ret_from_sys_call */
176 frame->gprs[8] = (unsigned long) &ret_from_fork;
178 /* fake return stack for resume(), don't go back to schedule */
179 frame->gprs[9] = (unsigned long) frame;
180 /* save fprs, if used in last task */
181 save_fp_regs(&p->thread.fp_regs);
182 p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE;
183 /* start new process with ar4 pointing to the correct address space */
184 p->thread.ar4 = get_fs().ar4;
185 /* Don't copy debug registers */
186 memset(&p->thread.per_info,0,sizeof(p->thread.per_info));
191 * Allocation and freeing of basic task resources.
192 * The task struct and the stack go together.
194 * NOTE: An order-2 allocation can easily fail. If this
195 * happens we fall back to using vmalloc ...
198 struct task_struct *alloc_task_struct(void)
200 struct task_struct *tsk = __get_free_pages(GFP_KERNEL, 2);
202 tsk = vmalloc(16384);
206 atomic_set((atomic_t *)(tsk + 1), 1);
210 void free_task_struct(struct task_struct *tsk)
212 if (atomic_dec_and_test((atomic_t *)(tsk + 1)))
214 if ((unsigned long)tsk < VMALLOC_START)
215 free_pages((unsigned long)tsk, 2);
221 void get_task_struct(struct task_struct *tsk)
223 atomic_inc((atomic_t *)(tsk + 1));
227 asmlinkage int sys_fork(struct pt_regs regs)
229 return do_fork(SIGCHLD, regs.gprs[15], ®s, 0);
232 asmlinkage int sys_clone(struct pt_regs regs)
234 unsigned long clone_flags;
237 clone_flags = regs.gprs[3];
238 newsp = regs.orig_gpr2;
240 newsp = regs.gprs[15];
241 return do_fork(clone_flags, newsp, ®s, 0);
245 * This is trivial, and on the face of it looks like it
246 * could equally well be done in user mode.
248 * Not so, for quite unobvious reasons - register pressure.
249 * In user mode vfork() cannot have a stack frame, and if
250 * done by calling the "clone()" system call directly, you
251 * do not have enough call-clobbered registers to hold all
252 * the information you need.
254 asmlinkage int sys_vfork(struct pt_regs regs)
256 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
257 regs.gprs[15], ®s, 0);
261 * sys_execve() executes a new program.
263 asmlinkage int sys_execve(struct pt_regs regs)
268 filename = getname((char *) regs.orig_gpr2);
269 error = PTR_ERR(filename);
270 if (IS_ERR(filename))
272 error = do_execve(filename, (char **) regs.gprs[3], (char **) regs.gprs[4], ®s);
275 current->ptrace &= ~PT_DTRACE;
276 current->thread.fp_regs.fpc=0;
289 * fill in the FPU structure for a core dump.
291 int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
293 save_fp_regs(fpregs);
298 * fill in the user structure for a core dump..
300 void dump_thread(struct pt_regs * regs, struct user * dump)
303 /* changed the size calculations - should hopefully work better. lbt */
304 dump->magic = CMAGIC;
305 dump->start_code = 0;
306 dump->start_stack = regs->gprs[15] & ~(PAGE_SIZE - 1);
307 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
308 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
309 dump->u_dsize -= dump->u_tsize;
311 if (dump->start_stack < TASK_SIZE)
312 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
313 memcpy(&dump->regs.gprs[0],regs,sizeof(s390_regs));
314 dump_fpu (regs, &dump->regs.fp_regs);
315 memcpy(&dump->regs.per_info,¤t->thread.per_info,sizeof(per_struct));
319 * These bracket the sleeping functions..
321 extern void scheduling_functions_start_here(void);
322 extern void scheduling_functions_end_here(void);
323 #define first_sched ((unsigned long) scheduling_functions_start_here)
324 #define last_sched ((unsigned long) scheduling_functions_end_here)
326 unsigned long get_wchan(struct task_struct *p)
328 unsigned long r14, r15, bc;
329 unsigned long stack_page;
331 if (!p || p == current || p->state == TASK_RUNNING)
333 stack_page = (unsigned long) p;
335 if (!stack_page || r15 < stack_page || r15 >= 16380+stack_page)
337 bc = *(unsigned long *) r15;
339 if (bc < stack_page || bc >= 16380+stack_page)
341 r14 = *(unsigned long *) (bc+112);
342 if (r14 < first_sched || r14 >= last_sched)
344 bc = *(unsigned long *) bc;
345 } while (count++ < 16);