2 * BK Id: SCCS/s.entry.S 1.46 09/02/02 12:29:57 paulus
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
8 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
9 * Adapted for Power Macintosh by Paul Mackerras.
10 * Low-level exception handlers and MMU support
11 * rewritten by Paul Mackerras.
12 * Copyright (C) 1996 Paul Mackerras.
13 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
14 * Adaptations for iSeries Lpar by Mike Corrigan & Dave Boutcher
16 * This file contains the system call entry code, context switch
17 * code, and exception/interrupt return code for PowerPC.
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version
22 * 2 of the License, or (at your option) any later version.
26 #include <linux/config.h>
27 #include <linux/errno.h>
28 #include <linux/sys.h>
29 #include <linux/threads.h>
30 #include <asm/processor.h>
33 #include <asm/cputable.h>
34 #include <asm/ppc_asm.h>
36 #ifdef CONFIG_PPC_ISERIES
37 #include "iSeries_asm.h"
38 #endif /* CONFIG_PPC_ISERIES */
41 #undef SHOW_SYSCALLS_TASK
43 #ifdef SHOW_SYSCALLS_TASK
50 * Handle a system call.
53 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
54 .stabs "entry.S",N_SO,0,0,0f
58 stw r0,THREAD+LAST_SYSCALL(r2)
59 lwz r11,_CCR(r1) /* Clear SO bit in CR */
64 #ifdef SHOW_SYSCALLS_TASK
65 lis r31,show_syscalls_task@ha
66 lwz r31,show_syscalls_task@l(r31)
93 #endif /* SHOW_SYSCALLS */
94 cmpi 0,r0,0x7777 /* Special case for 'sys_sigreturn' */
96 cmpi 0,r0,0x6666 /* Special case for 'sys_rt_sigreturn' */
98 lwz r10,TASK_PTRACE(r2)
99 andi. r10,r10,PT_TRACESYS
101 cmpli 0,r0,NR_syscalls
103 lis r10,sys_call_table@h
104 ori r10,r10,sys_call_table@l
106 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
110 addi r9,r1,STACK_FRAME_OVERHEAD
111 blrl /* Call handler */
112 .globl ret_from_syscall_1
114 20: stw r3,RESULT(r1) /* Save result */
116 #ifdef SHOW_SYSCALLS_TASK
131 cmpi 0,r3,ERESTARTNOHAND
134 22: lwz r10,_CCR(r1) /* Set SO bit in CR */
137 30: stw r3,GPR3(r1) /* Update return value */
142 10: addi r3,r1,STACK_FRAME_OVERHEAD
144 cmpi 0,r3,0 /* Check for restarted system call */
147 /* sys_rt_sigreturn */
148 16: addi r3,r1,STACK_FRAME_OVERHEAD
150 cmpi 0,r3,0 /* Check for restarted system call */
153 /* Traced system call support */
155 lwz r0,GPR0(r1) /* Restore original registers */
163 cmpli 0,r0,NR_syscalls
165 lis r10,sys_call_table@h
166 ori r10,r10,sys_call_table@l
168 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
172 addi r9,r1,STACK_FRAME_OVERHEAD
173 blrl /* Call handler */
174 .globl ret_from_syscall_2
176 stw r3,RESULT(r1) /* Save result */
177 stw r3,GPR0(r1) /* temporary gross hack to make strace work */
182 cmpi 0,r3,ERESTARTNOHAND
185 52: lwz r10,_CCR(r1) /* Set SO bit in CR */
188 60: stw r3,GPR3(r1) /* Update return value */
194 7: .string "syscall %d(%x, %x, %x, %x, %x, "
195 77: .string "%x, %x), current=%p\n"
196 79: .string " -> %x\n"
201 * This routine switches between two different tasks. The process
202 * state of one is saved on its kernel stack. Then the state
203 * of the other is restored from its kernel stack. The memory
204 * management hardware is updated to the second process's state.
205 * Finally, we can return to the second process.
206 * On entry, r3 points to the THREAD for the current task, r4
207 * points to the THREAD for the new task.
209 * This routine is always called with interrupts disabled
210 * (soft disabled for iSeries).
212 * Note: there are two ways to get to the "going out" portion
213 * of this code; either by coming in via the entry (_switch)
214 * or via "fork" which must set up an environment equivalent
215 * to the "_switch" path. If you change this , you'll have to
216 * change the fork code also.
218 * The code which creates the new task context is in 'copy_thread'
219 * in arch/ppc/kernel/process.c
222 stwu r1,-INT_FRAME_SIZE(r1)
226 /* r3-r13 are caller saved -- Cort */
230 mflr r20 /* Return to switch caller */
231 stw r20,INT_FRAME_SIZE+4(r1)
233 li r0,MSR_FP /* Disable floating-point */
234 #ifdef CONFIG_ALTIVEC
236 oris r0,r0,MSR_VEC@h /* Disable altivec */
237 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
238 #endif /* CONFIG_ALTIVEC */
239 and. r0,r0,r22 /* FP or altivec enabled? */
255 stw r1,KSP(r3) /* Set old stack pointer */
259 mtspr SPRG3,r0 /* Update current THREAD phys addr */
260 lwz r1,KSP(r4) /* Load new stack pointer */
261 /* save the old current 'last' for return value */
263 addi r2,r4,-THREAD /* Update current */
266 /* r3-r13 are destroyed -- Cort */
271 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
273 addi r1,r1,INT_FRAME_SIZE
279 lwz r0,TASK_PTRACE(r2)
280 andi. r0,r0,PT_TRACESYS
284 .globl ret_from_intercept
287 * We may be returning from RTL and cannot do the normal checks
292 .globl ret_from_except
294 #ifdef CONFIG_PPC_ISERIES
295 bl iSeries_check_intr
296 #endif /* CONFIG_PPC_ISERIES */
297 lwz r3,_MSR(r1) /* Returning to user mode? */
299 beq+ do_signal_ret /* if so, check need_resched and signals */
300 lwz r3,NEED_RESCHED(r2)
301 cmpi 0,r3,0 /* check need_resched flag */
304 7: lwz r5,SIGPENDING(r2) /* Check for pending unblocked signals */
308 addi r4,r1,STACK_FRAME_OVERHEAD
312 .globl ret_to_user_hook
315 #ifdef CONFIG_PPC_ISERIES
316 mfmsr r0 /* Hard disable */
321 lbz r5,PACAPROCENABLED(r5)
323 bne restore /* skip checks if already soft enabled */
330 CHECKANYINT(r4,r5,r6)
334 mtmsr r0 /* Hard enable */
335 b ret_from_except /* An interrupt came in after we checked above */
336 #endif /* CONFIG_PPC_ISERIES */
345 /* make sure we hard disable here, even if rtl is active, to protect
346 * SRR[01] and SPRG2 -- Cort
348 mfmsr r0 /* Get current interrupt state */
349 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
351 rlwinm r0,r0,0,23,21 /* clear MSR_DE in r0 */
353 SYNC /* Some chip revs have problems here... */
354 mtmsr r0 /* Update machine state */
357 stwcx. r0,0,r1 /* to clear the reservation */
359 /* if returning to user mode, set new sprg2 and save kernel SP */
363 #ifdef CONFIG_ALTIVEC
365 lwz r0,THREAD+THREAD_VRSAVE(r2)
366 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
367 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
368 #endif /* CONFIG_ALTIVEC */
369 #if defined(CONFIG_4xx) && !defined(CONFIG_BDI_SWITCH)
370 /* Restore the processor debugging state of the thread. Only do
371 * this if we aren't using an Abatron BDI JTAG debugger. It doesn't
372 * tolerate others mucking with the debug registers.
374 lwz r0,THREAD+THREAD_DBCR0(r2)
377 addi r0,r1,INT_FRAME_SIZE /* size of frame */
378 stw r0,THREAD+KSP(r2) /* save kernel stack pointer */
379 #ifndef CONFIG_PPC_ISERIES
382 mtspr SPRG2,r8 /* phys exception stack pointer */
383 #else /* CONFIG_PPC_ISERIES */
384 mfspr r2,SPRG1 /* Get Paca address */
385 stw r1,PACAKSAVE(r2) /* save exception stack pointer */
386 #endif /* CONFIG_PPC_ISERIES */
392 #ifdef CONFIG_PPC_ISERIES
393 mfspr r2,SPRG1 /* Get Paca address */
395 stb r0,PACAPROCENABLED(r2) /* Restore soft enabled/disabled */
396 #endif /* CONFIG_PPC_ISERIES */
401 /* We have to "dummy" load from the context save area in case
402 * these instructions cause an MMU fault. If this happens
403 * after we load SRR0/SRR1, our return context is hosed. -- Dan
405 * This workaround is not enough, we must also make sure the
406 * actual code for this routine is in the TLB or BAT mapped.
407 * For 6xx/Power3, we know the code is in a BAT, so this should
408 * be enough in UP. In SMP, I limit lowmem to the amount of
409 * RAM that can be BAT mapped. Other CPUs may need additional
410 * tweaks, especially if used SMP or if the code for this routine
411 * crosses page boundaries. The TLB pin down for 4xx should help
412 * for example. --BenH.
417 #endif /* ndef CONFIG_SMP */
419 /* We re-use r3,r4 here (the load above was to cause the MMU
420 * fault if necessary). Using r3,r4 removes the need to "dummy"
421 * load the CCR and NIP. Since we load them we may as well
443 * PROM code for specific machines follows. Put it
444 * here so it's easy to add arch-specific sections later.
447 #if defined(CONFIG_ALL_PPC)
449 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
450 * called with the MMU off.
456 lwz r4,rtas_data@l(r4)
457 lis r6,1f@ha /* physical return address for rtas */
459 addis r6,r6,-KERNELBASE@h
460 subi r7,r1,INT_FRAME_SIZE
461 addis r7,r7,-KERNELBASE@h
463 lwz r8,rtas_entry@l(r8)
467 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_FE0|MSR_FE1
469 li r10,MSR_IR|MSR_DR|MSR_FP
471 SYNC /* disable interrupts so SRR0/1 */
472 mtmsr r0 /* don't get trashed */
479 1: addis r9,r1,-KERNELBASE@h
480 lwz r8,20(r9) /* get return address */
481 lwz r9,8(r9) /* original msr value */
487 RFI /* return to caller */
488 #endif /* CONFIG_ALL_PPC */