2 * linux/arch/arm/kernel/entry-armv.S
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Low-level vector interface routines
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction that causes
15 * it to save wrong values... Be aware!
17 #include <linux/config.h>
19 #include <asm/memory.h>
21 #include <asm/vfpmacros.h>
22 #include <asm/arch/entry-macro.S>
24 #include "entry-header.S"
27 * Interrupt handling. Preserves r7, r8, r9
30 1: get_irqnr_and_base r0, r6, r5, lr
33 @ routine called with r0 = irq number, r1 = struct pt_regs *
42 * this macro assumes that irqstat (r6) and base (r5) are
43 * preserved from get_irqnr_and_base above
45 test_for_ipi r0, r6, r5, lr
50 #ifdef CONFIG_LOCAL_TIMERS
51 test_for_ltirq r0, r6, r5, lr
61 * Invalid mode handlers
63 .macro inv_entry, reason
64 sub sp, sp, #S_FRAME_SIZE
70 inv_entry BAD_PREFETCH
82 inv_entry BAD_UNDEFINSTR
85 @ XXX fall through to common_invalid
89 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
95 add r0, sp, #S_PC @ here for interlock avoidance
96 mov r7, #-1 @ "" "" "" ""
97 str r4, [sp] @ save preserved r0
98 stmia r0, {r5 - r7} @ lr_<exception>,
99 @ cpsr_<exception>, "old_r0"
109 sub sp, sp, #S_FRAME_SIZE
113 add r5, sp, #S_SP @ here for interlock avoidance
114 mov r4, #-1 @ "" "" "" ""
115 add r0, sp, #S_FRAME_SIZE @ "" "" "" ""
116 str r1, [sp] @ save the "real" r0 copied
117 @ from the exception stack
122 @ We are now ready to fill in the remaining blanks on the stack:
126 @ r2 - lr_<exception>, already fixed up for correct return/restart
127 @ r3 - spsr_<exception>
128 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
138 @ get ready to re-enable interrupts if appropriate
142 biceq r9, r9, #PSR_I_BIT
145 @ Call the processor-specific abort handler:
147 @ r2 - aborted context pc
148 @ r3 - aborted context cpsr
150 @ The abort handler must return the aborted address in r0, and
151 @ the fault status register in r1. r9 must be preserved.
162 @ set desired IRQ state, then call main handler
169 @ IRQs off again before pulling preserved data off the stack
174 @ restore SPSR and restart the instruction
178 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
184 #ifdef CONFIG_PREEMPT
186 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
187 add r7, r8, #1 @ increment it
188 str r7, [tsk, #TI_PREEMPT]
192 #ifdef CONFIG_PREEMPT
193 ldr r0, [tsk, #TI_FLAGS] @ get flags
194 tst r0, #_TIF_NEED_RESCHED
197 ldr r0, [tsk, #TI_PREEMPT] @ read preempt value
198 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
200 strne r0, [r0, -r0] @ bug()
202 ldr r0, [sp, #S_PSR] @ irqs are already disabled
204 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
208 #ifdef CONFIG_PREEMPT
210 teq r8, #0 @ was preempt count = 0
211 ldreq r6, .LCirq_stat
213 ldr r0, [r6, #4] @ local_irq_count
214 ldr r1, [r6, #8] @ local_bh_count
217 mov r7, #0 @ preempt_schedule_irq
218 str r7, [tsk, #TI_PREEMPT] @ expects preempt_count == 0
219 1: bl preempt_schedule_irq @ irq en/disable is done inside
220 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
221 tst r0, #_TIF_NEED_RESCHED
222 beq preempt_return @ go again
231 @ call emulation code, which returns using r9 if it has emulated
232 @ the instruction, or the more conventional lr if we are to treat
233 @ this as a real undefined instruction
241 mov r0, sp @ struct pt_regs *regs
245 @ IRQs off again before pulling preserved data off the stack
250 @ restore SPSR and restart the instruction
252 ldr lr, [sp, #S_PSR] @ Get SVC cpsr
254 ldmia sp, {r0 - pc}^ @ Restore SVC registers
261 @ re-enable interrupts if appropriate
265 biceq r9, r9, #PSR_I_BIT
269 @ set args, then call main handler
271 @ r0 - address of faulting instruction
272 @ r1 - pointer to registers on stack
274 mov r0, r2 @ address (pc)
276 bl do_PrefetchAbort @ call abort handler
279 @ IRQs off again before pulling preserved data off the stack
284 @ restore SPSR and restart the instruction
288 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
299 #ifdef CONFIG_PREEMPT
308 sub sp, sp, #S_FRAME_SIZE
312 add r0, sp, #S_PC @ here for interlock avoidance
313 mov r4, #-1 @ "" "" "" ""
315 str r1, [sp] @ save the "real" r0 copied
316 @ from the exception stack
318 #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
319 @ make sure our user space atomic helper is aborted
321 bichs r3, r3, #PSR_Z_BIT
325 @ We are now ready to fill in the remaining blanks on the stack:
327 @ r2 - lr_<exception>, already fixed up for correct return/restart
328 @ r3 - spsr_<exception>
329 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
331 @ Also, separately save sp_usr and lr_usr
337 @ Enable the alignment trap while in kernel mode
342 @ Clear FP to mark the first stack frame
352 @ Call the processor-specific abort handler:
354 @ r2 - aborted context pc
355 @ r3 - aborted context cpsr
357 @ The abort handler must return the aborted address in r0, and
358 @ the fault status register in r1.
369 @ IRQs on, then call the main handler
373 adr lr, ret_from_exception
381 #ifdef CONFIG_PREEMPT
382 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
383 add r7, r8, #1 @ increment it
384 str r7, [tsk, #TI_PREEMPT]
388 #ifdef CONFIG_PREEMPT
389 ldr r0, [tsk, #TI_PREEMPT]
390 str r8, [tsk, #TI_PREEMPT]
404 tst r3, #PSR_T_BIT @ Thumb mode?
405 bne fpundefinstr @ ignore FP
409 @ fall through to the emulation code, which returns using r9 if
410 @ it has emulated the instruction, or the more conventional lr
411 @ if we are to treat this as a real undefined instruction
416 adr r9, ret_from_exception
419 @ fallthrough to call_fpe
423 * The out of line fixup for the ldrt above.
425 .section .fixup, "ax"
428 .section __ex_table,"a"
433 * Check whether the instruction is a co-processor instruction.
434 * If yes, we need to call the relevant co-processor handler.
436 * Note that we don't do a full check here for the co-processor
437 * instructions; all instructions with bit 27 set are well
438 * defined. The only instructions that should fault are the
439 * co-processor instructions. However, we have to watch out
440 * for the ARM6/ARM7 SWI bug.
442 * Emulators may wish to make use of the following registers:
443 * r0 = instruction opcode.
445 * r10 = this threads thread_info structure.
448 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
449 #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
450 and r8, r0, #0x0f000000 @ mask out op-code bits
451 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
454 get_thread_info r10 @ get current thread
455 and r8, r0, #0x00000f00 @ mask out CP number
457 add r6, r10, #TI_USED_CP
458 strb r7, [r6, r8, lsr #8] @ set appropriate used_cp[]
460 @ Test if we need to give access to iWMMXt coprocessors
461 ldr r5, [r10, #TI_FLAGS]
462 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
463 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
464 bcs iwmmxt_task_enable
467 add pc, pc, r8, lsr #6
471 b do_fpe @ CP#1 (FPE)
472 b do_fpe @ CP#2 (FPE)
481 b do_vfp @ CP#10 (VFP)
482 b do_vfp @ CP#11 (VFP)
484 mov pc, lr @ CP#10 (VFP)
485 mov pc, lr @ CP#11 (VFP)
489 mov pc, lr @ CP#14 (Debug)
490 mov pc, lr @ CP#15 (Control)
494 add r10, r10, #TI_FPSTATE @ r10 = workspace
495 ldr pc, [r4] @ Call FP module USR entry point
498 * The FP module is called with these registers set:
501 * r9 = normal "successful" return address
503 * lr = unrecognised FP instruction return address
513 adr lr, ret_from_exception
520 enable_irq @ Enable interrupts
521 mov r0, r2 @ address (pc)
523 bl do_PrefetchAbort @ call abort handler
526 * This is the return code to user mode for abort handlers
528 ENTRY(ret_from_exception)
534 * Register switch for ARMv3 and ARMv4 processors
535 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
536 * previous and next are guaranteed not to be the same.
539 add ip, r1, #TI_CPU_SAVE
540 ldr r3, [r2, #TI_TP_VALUE]
541 stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack
543 add r2, r2, #TI_CPU_DOMAIN
545 ldr r6, [r2, #TI_CPU_DOMAIN]!
547 #if __LINUX_ARM_ARCH__ >= 6
548 #ifdef CONFIG_CPU_MPCORE
551 strex r5, r4, [ip] @ Clear exclusive monitor
554 #if defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_IWMMXT)
558 #if defined(CONFIG_HAS_TLS_REG)
559 mcr p15, 0, r3, c13, c0, 3 @ set TLS register
560 #elif !defined(CONFIG_TLS_REG_EMUL)
562 str r3, [r4, #-15] @ TLS val at 0xffff0ff0
565 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
568 @ Always disable VFP so we can lazily save/restore the old
569 @ state. This occurs in the context of the previous thread.
571 bic r4, r4, #FPEXC_ENABLE
574 #if defined(CONFIG_IWMMXT)
575 bl iwmmxt_task_switch
576 #elif defined(CONFIG_CPU_XSCALE)
577 add r4, r2, #40 @ cpu_context_save->extra
581 ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
588 * These are segment of kernel provided user code reachable from user space
589 * at a fixed address in kernel memory. This is used to provide user space
590 * with some operations which require kernel help because of unimplemented
591 * native feature and/or instructions in many ARM CPUs. The idea is for
592 * this code to be executed directly in user mode for best efficiency but
593 * which is too intimate with the kernel counter part to be left to user
594 * libraries. In fact this code might even differ from one CPU to another
595 * depending on the available instruction set and restrictions like on
596 * SMP systems. In other words, the kernel reserves the right to change
597 * this code as needed without warning. Only the entry points and their
598 * results are guaranteed to be stable.
600 * Each segment is 32-byte aligned and will be moved to the top of the high
601 * vector page. New segments (if ever needed) must be added in front of
602 * existing ones. This mechanism should be used only for things that are
603 * really small and justified, and not be abused freely.
605 * User space is expected to implement those things inline when optimizing
606 * for a processor that has the necessary native support, but only if such
607 * resulting binaries are already to be incompatible with earlier ARM
608 * processors due to the use of unsupported instructions other than what
609 * is provided here. In other words don't make binaries unable to run on
610 * earlier processors just for the sake of not using these kernel helpers
611 * if your compiled code is not going to use the new instructions for other
616 .globl __kuser_helper_start
617 __kuser_helper_start:
620 * Reference prototype:
622 * void __kernel_memory_barrier(void)
626 * lr = return address
634 * the Z flag might be lost
636 * Definition and user space usage example:
638 * typedef void (__kernel_dmb_t)(void);
639 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
641 * Apply any needed memory barrier to preserve consistency with data modified
642 * manually and __kuser_cmpxchg usage.
644 * This could be used as follows:
646 * #define __kernel_dmb() \
647 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
651 __kuser_memory_barrier: @ 0xffff0fa0
653 #if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
654 mcr p15, 0, r0, c7, c10, 5 @ dmb
661 * Reference prototype:
663 * int __kernel_cmpxchg(int oldval, int newval, int *ptr)
670 * lr = return address
674 * r0 = returned value (zero or non-zero)
675 * C flag = set if r0 == 0, clear if r0 != 0
681 * Definition and user space usage example:
683 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
684 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
686 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
687 * Return zero if *ptr was changed or non-zero if no exchange happened.
688 * The C flag is also set if *ptr was changed to allow for assembly
689 * optimization in the calling code.
691 * Note: this routine already includes memory barriers as needed.
693 * For example, a user space atomic_add implementation could look like this:
695 * #define atomic_add(ptr, val) \
696 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
697 * register unsigned int __result asm("r1"); \
699 * "1: @ atomic_add\n\t" \
700 * "ldr r0, [r2]\n\t" \
701 * "mov r3, #0xffff0fff\n\t" \
702 * "add lr, pc, #4\n\t" \
703 * "add r1, r0, %2\n\t" \
704 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
706 * : "=&r" (__result) \
707 * : "r" (__ptr), "rIL" (val) \
708 * : "r0","r3","ip","lr","cc","memory" ); \
712 __kuser_cmpxchg: @ 0xffff0fc0
714 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
717 * Poor you. No fast solution possible...
718 * The kernel itself must perform the operation.
719 * A special ghost syscall is used for that (see traps.c).
724 #elif __LINUX_ARM_ARCH__ < 6
727 * Theory of operation:
729 * We set the Z flag before loading oldval. If ever an exception
730 * occurs we can not be sure the loaded value will still be the same
731 * when the exception returns, therefore the user exception handler
732 * will clear the Z flag whenever the interrupted user code was
733 * actually from the kernel address space (see the usr_entry macro).
735 * The post-increment on the str is used to prevent a race with an
736 * exception happening just after the str instruction which would
737 * clear the Z flag although the exchange was done.
739 teq ip, ip @ set Z flag
740 ldr ip, [r2] @ load current val
741 add r3, r2, #1 @ prepare store ptr
742 teqeq ip, r0 @ compare with oldval if still allowed
743 streq r1, [r3, #-1]! @ store newval if still allowed
744 subs r0, r2, r3 @ if r2 == r3 the str occured
750 mcr p15, 0, r0, c7, c10, 5 @ dmb
757 mcr p15, 0, r0, c7, c10, 5 @ dmb
766 * Reference prototype:
768 * int __kernel_get_tls(void)
772 * lr = return address
780 * the Z flag might be lost
782 * Definition and user space usage example:
784 * typedef int (__kernel_get_tls_t)(void);
785 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
787 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
789 * This could be used as follows:
791 * #define __kernel_get_tls() \
792 * ({ register unsigned int __val asm("r0"); \
793 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
794 * : "=r" (__val) : : "lr","cc" ); \
798 __kuser_get_tls: @ 0xffff0fe0
800 #if !defined(CONFIG_HAS_TLS_REG) && !defined(CONFIG_TLS_REG_EMUL)
802 ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0
807 mrc p15, 0, r0, c13, c0, 3 @ read TLS register
813 .word 0 @ pad up to __kuser_helper_version
817 * Reference declaration:
819 * extern unsigned int __kernel_helper_version;
821 * Definition and user space usage example:
823 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
825 * User space may read this to determine the curent number of helpers
829 __kuser_helper_version: @ 0xffff0ffc
830 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
832 .globl __kuser_helper_end
839 * This code is copied to 0xffff0200 so we can use branches in the
840 * vectors, rather than ldr's. Note that this code must not
841 * exceed 0x300 bytes.
843 * Common stub entry macro:
844 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
846 * SP points to a minimal amount of processor-private memory, the address
847 * of which is copied into r0 for the mode specific abort handler.
849 .macro vector_stub, name, mode, correction=0
854 sub lr, lr, #\correction
858 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
861 stmia sp, {r0, lr} @ save r0, lr
863 str lr, [sp, #8] @ save spsr
866 @ Prepare for SVC32 mode. IRQs remain disabled.
869 eor r0, r0, #(\mode ^ SVC_MODE)
873 @ the branch table must immediately follow this code
877 ldr lr, [pc, lr, lsl #2]
878 movs pc, lr @ branch to handler in SVC mode
884 * Interrupt dispatcher
886 vector_stub irq, IRQ_MODE, 4
888 .long __irq_usr @ 0 (USR_26 / USR_32)
889 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
890 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
891 .long __irq_svc @ 3 (SVC_26 / SVC_32)
892 .long __irq_invalid @ 4
893 .long __irq_invalid @ 5
894 .long __irq_invalid @ 6
895 .long __irq_invalid @ 7
896 .long __irq_invalid @ 8
897 .long __irq_invalid @ 9
898 .long __irq_invalid @ a
899 .long __irq_invalid @ b
900 .long __irq_invalid @ c
901 .long __irq_invalid @ d
902 .long __irq_invalid @ e
903 .long __irq_invalid @ f
906 * Data abort dispatcher
907 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
909 vector_stub dabt, ABT_MODE, 8
911 .long __dabt_usr @ 0 (USR_26 / USR_32)
912 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
913 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
914 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
915 .long __dabt_invalid @ 4
916 .long __dabt_invalid @ 5
917 .long __dabt_invalid @ 6
918 .long __dabt_invalid @ 7
919 .long __dabt_invalid @ 8
920 .long __dabt_invalid @ 9
921 .long __dabt_invalid @ a
922 .long __dabt_invalid @ b
923 .long __dabt_invalid @ c
924 .long __dabt_invalid @ d
925 .long __dabt_invalid @ e
926 .long __dabt_invalid @ f
929 * Prefetch abort dispatcher
930 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
932 vector_stub pabt, ABT_MODE, 4
934 .long __pabt_usr @ 0 (USR_26 / USR_32)
935 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
936 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
937 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
938 .long __pabt_invalid @ 4
939 .long __pabt_invalid @ 5
940 .long __pabt_invalid @ 6
941 .long __pabt_invalid @ 7
942 .long __pabt_invalid @ 8
943 .long __pabt_invalid @ 9
944 .long __pabt_invalid @ a
945 .long __pabt_invalid @ b
946 .long __pabt_invalid @ c
947 .long __pabt_invalid @ d
948 .long __pabt_invalid @ e
949 .long __pabt_invalid @ f
952 * Undef instr entry dispatcher
953 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
955 vector_stub und, UND_MODE
957 .long __und_usr @ 0 (USR_26 / USR_32)
958 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
959 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
960 .long __und_svc @ 3 (SVC_26 / SVC_32)
961 .long __und_invalid @ 4
962 .long __und_invalid @ 5
963 .long __und_invalid @ 6
964 .long __und_invalid @ 7
965 .long __und_invalid @ 8
966 .long __und_invalid @ 9
967 .long __und_invalid @ a
968 .long __und_invalid @ b
969 .long __und_invalid @ c
970 .long __und_invalid @ d
971 .long __und_invalid @ e
972 .long __und_invalid @ f
976 /*=============================================================================
978 *-----------------------------------------------------------------------------
979 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
980 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
981 * Basically to switch modes, we *HAVE* to clobber one register... brain
982 * damage alert! I don't think that we can execute any code in here in any
983 * other mode than FIQ... Ok you can switch to another mode, but you can't
984 * get out of that mode without clobbering one register.
990 /*=============================================================================
991 * Address exception handler
992 *-----------------------------------------------------------------------------
993 * These aren't too critical.
994 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1001 * We group all the following data together to optimise
1002 * for CPUs with separate I & D caches.
1012 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1014 .globl __vectors_start
1017 b vector_und + stubs_offset
1018 ldr pc, .LCvswi + stubs_offset
1019 b vector_pabt + stubs_offset
1020 b vector_dabt + stubs_offset
1021 b vector_addrexcptn + stubs_offset
1022 b vector_irq + stubs_offset
1023 b vector_fiq + stubs_offset
1025 .globl __vectors_end
1031 .globl cr_no_alignment