2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * arch/sh64/kernel/entry.S
8 * Copyright (C) 2000, 2001 Paolo Alberelli
12 #include <linux/config.h>
13 #include <linux/errno.h>
14 #include <linux/sys.h>
16 #include <asm/processor.h>
17 #include <asm/registers.h>
18 #include <asm/unistd.h>
21 * A few defines that ought to come from sched.h referring
22 * to the task structure. Byte offsets within the task
23 * structure and related flags.
27 #define need_resched 20
30 #define PT_TRACESYS 0x00000002
36 #define SR_ASID_MASK 0x00ff0000
37 #define SR_FD_MASK 0x00008000
38 #define SR_SS 0x08000000
39 #define SR_BL 0x10000000
40 #define SR_MD 0x40000000
41 #define SR_MMU 0x80000000
46 #define EVENT_INTERRUPT 0
47 #define EVENT_FAULT_TLB 1
48 #define EVENT_FAULT_NOT_TLB 2
52 #define RESET_CAUSE 0x20
53 #define DEBUGSS_CAUSE 0x980
56 * Frame layout. Quad index.
58 #define FRAME_T(x) FRAME_TBASE+(x*8)
59 #define FRAME_R(x) FRAME_RBASE+(x*8)
60 #define FRAME_S(x) FRAME_SBASE+(x*8)
65 /* Arrange the save frame to be a multiple of 32 bytes long */
67 #define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
68 #define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
69 #define FRAME_PBASE (FRAME_TBASE+(8*8)) /* t0 -t7 */
70 #define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
72 #define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
73 #define FP_FRAME_BASE 0
83 /* These are the registers saved in the TLB path that aren't saved in the first
84 level of the normal one. */
85 #define TLB_SAVED_R25 7*8
86 #define TLB_SAVED_T1 8*8
87 #define TLB_SAVED_T2 9*8
88 #define TLB_SAVED_T3 10*8
89 #define TLB_SAVED_T4 11*8
90 /* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
91 breakage otherwise. */
92 #define TLB_SAVED_R0 12*8
93 #define TLB_SAVED_R1 13*8
102 #define FAST_TLBMISS_STACK_CACHELINES 4
103 #define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
105 /* Register back-up area for all exceptions */
107 /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
108 * register saves etc. */
109 .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
110 /* This is 32 byte aligned by construction */
111 /* Register back-up area for all exceptions */
131 /* Save area for RESVEC exceptions. We cannot use reg_save_area because of
132 * reentrancy. Note this area may be accessed via physical address.
133 * Align so this fits a whole single cache line, for ease of purging.
144 /* Jump table of 3rd level handlers */
146 .long do_exception_error /* 0x000 */
147 .long do_exception_error /* 0x020 */
148 .long tlb_miss_load /* 0x040 */
149 .long tlb_miss_store /* 0x060 */
150 .long do_exception_error /* 0x080 */
151 .long tlb_miss_load /* 0x0A0 */
152 .long tlb_miss_store /* 0x0C0 */
153 .long do_address_error_load /* 0x0E0 */
154 .long do_address_error_store /* 0x100 */
155 #ifndef CONFIG_NOFPU_SUPPORT
156 .long do_fpu_error /* 0x120 */
158 .long do_exception_error /* 0x120 */
160 .long do_exception_error /* 0x140 */
161 .long system_call /* 0x160 */
162 .long do_reserved_inst /* 0x180 */
163 .long do_illegal_slot_inst /* 0x1A0 */
164 .long do_NMI /* 0x1C0 */
165 .long do_exception_error /* 0x1E0 */
167 .long do_IRQ /* 0x200 - 0x3C0 */
169 .long do_exception_error /* 0x3E0 */
171 .long do_IRQ /* 0x400 - 0x7E0 */
173 .long fpu_error_or_IRQA /* 0x800 */
174 .long fpu_error_or_IRQB /* 0x820 */
175 .long do_IRQ /* 0x840 */
176 .long do_IRQ /* 0x860 */
178 .long do_exception_error /* 0x880 - 0x920 */
180 .long do_software_break_point /* 0x940 */
181 .long do_exception_error /* 0x960 */
182 .long do_single_step /* 0x980 */
185 .long do_exception_error /* 0x9A0 - 0x9E0 */
187 .long do_IRQ /* 0xA00 */
188 .long do_IRQ /* 0xA20 */
189 .long itlb_miss_or_IRQ /* 0xA40 */
190 .long do_IRQ /* 0xA60 */
191 .long do_IRQ /* 0xA80 */
192 .long itlb_miss_or_IRQ /* 0xAA0 */
193 .long do_exception_error /* 0xAC0 */
194 .long do_address_error_exec /* 0xAE0 */
196 .long do_exception_error /* 0xB00 - 0xBE0 */
199 .long do_IRQ /* 0xC00 - 0xE20 */
203 /* System calls jump table */
205 .globl sys_call_table
207 .long sys_ni_syscall /* 0 - old "setup()" system call */
212 .long sys_open /* 5 */
217 .long sys_unlink /* 10 */
222 .long sys_chmod /* 15 */
224 .long sys_ni_syscall /* old break syscall holder */
227 .long sys_getpid /* 20 */
232 .long sys_stime /* 25 */
237 .long sys_utime /* 30 */
238 .long sys_ni_syscall /* old stty syscall holder */
239 .long sys_ni_syscall /* old gtty syscall holder */
242 .long sys_ni_syscall /* 35 */
243 /* old ftime syscall holder */
248 .long sys_rmdir /* 40 */
252 .long sys_ni_syscall /* old prof syscall holder */
253 .long sys_brk /* 45 */
258 .long sys_getegid16 /* 50 */
260 .long sys_umount /* recycled never used phys( */
261 .long sys_ni_syscall /* old lock syscall holder */
263 .long sys_fcntl /* 55 */
264 .long sys_ni_syscall /* old mpx syscall holder */
266 .long sys_ni_syscall /* old ulimit syscall holder */
267 .long sys_ni_syscall /* sys_olduname */
268 .long sys_umask /* 60 */
273 .long sys_getpgrp /* 65 */
278 .long sys_setreuid16 /* 70 */
282 .long sys_sethostname
283 .long sys_setrlimit /* 75 */
284 .long sys_old_getrlimit
286 .long sys_gettimeofday
287 .long sys_settimeofday
288 .long sys_getgroups16 /* 80 */
289 .long sys_setgroups16
290 .long sys_ni_syscall /* sys_oldselect */
293 .long sys_readlink /* 85 */
298 .long old_mmap /* 90 */
303 .long sys_fchown16 /* 95 */
304 .long sys_getpriority
305 .long sys_setpriority
306 .long sys_ni_syscall /* old profil syscall holder */
308 .long sys_fstatfs /* 100 */
309 .long sys_ni_syscall /* ioperm */
310 .long sys_socketcall /* Obsolete implementation of socket syscall */
313 .long sys_getitimer /* 105 */
318 .long sys_ni_syscall /* 110 */ /* iopl */
320 .long sys_ni_syscall /* idle */
321 .long sys_ni_syscall /* vm86old */
323 .long sys_swapoff /* 115 */
325 .long sys_ipc /* Obsolete ipc syscall implementation */
328 .long sys_clone /* 120 */
329 .long sys_setdomainname
331 .long sys_ni_syscall /* sys_modify_ldt */
333 .long sys_mprotect /* 125 */
334 .long sys_sigprocmask
335 .long sys_create_module
336 .long sys_init_module
337 .long sys_delete_module
338 .long sys_get_kernel_syms /* 130 */
343 .long sys_sysfs /* 135 */
344 .long sys_personality
345 .long sys_ni_syscall /* for afs_syscall */
348 .long sys_llseek /* 140 */
353 .long sys_readv /* 145 */
358 .long sys_mlock /* 150 */
362 .long sys_sched_setparam
363 .long sys_sched_getparam /* 155 */
364 .long sys_sched_setscheduler
365 .long sys_sched_getscheduler
366 .long sys_sched_yield
367 .long sys_sched_get_priority_max
368 .long sys_sched_get_priority_min /* 160 */
369 .long sys_sched_rr_get_interval
372 .long sys_setresuid16
373 .long sys_getresuid16 /* 165 */
374 .long sys_ni_syscall /* vm86 */
375 .long sys_query_module
378 .long sys_setresgid16 /* 170 */
379 .long sys_getresgid16
381 .long sys_rt_sigreturn
382 .long sys_rt_sigaction
383 .long sys_rt_sigprocmask /* 175 */
384 .long sys_rt_sigpending
385 .long sys_rt_sigtimedwait
386 .long sys_rt_sigqueueinfo
387 .long sys_rt_sigsuspend
388 .long sys_pread /* 180 */
393 .long sys_capset /* 185 */
394 .long sys_sigaltstack
396 .long sys_ni_syscall /* streams1 */
397 .long sys_ni_syscall /* streams2 */
398 .long sys_vfork /* 190 */
402 .long sys_ftruncate64
403 .long sys_stat64 /* 195 */
408 .long sys_getgid /* 200 */
413 .long sys_getgroups /* 205 */
418 .long sys_setresgid /* 210 */
423 .long sys_setfsuid /* 215 */
428 .long sys_socket /* 220 */
433 .long sys_getsockname /* 225 */
434 .long sys_getpeername
438 .long sys_recv /* 230*/
443 .long sys_sendmsg /* 235 */
445 .long sys_semop /* New ipc syscall implementation */
448 .long sys_msgsnd /* 240 */
453 .long sys_shmdt /* 245 */
458 * NOTE!! This doesn't have to be exact - we just have
459 * to make sure we have _enough_ of the "sys_ni_syscall"
460 * entries. Don't panic if you notice that this hasn't
461 * been shrunk every time we add a new system call.
463 .rept NR_syscalls-247
467 .section .text64, "ax"
470 * --- Exception/Interrupt/Event Handling Section
474 * VBR and RESVEC blocks.
476 * First level handler for VBR-based exceptions.
478 * To avoid waste of space, align to the maximum text block size.
479 * This is assumed to be at most 128 bytes or 32 instructions.
480 * DO NOT EXCEED 32 instructions on the first level handlers !
482 * Also note that RESVEC is contained within the VBR block
483 * where the room left (1KB - TEXT_SIZE) allows placing
484 * the RESVEC block (at most 512B + TEXT_SIZE).
486 * So first (and only) level handler for RESVEC-based exceptions.
488 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
489 * and interrupt) we are a lot tight with register space until
490 * saving onto the stack frame, which is done in handle_exception().
494 #define TEXT_SIZE 128
495 #define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
499 .space 256, 0 /* Power-on class handler, */
500 /* not required here */
502 /* Save original stack pointer into KCR1 */
505 /* Save other original registers into reg_save_area */
506 _loada reg_save_area, SP
507 st.q SP, SAVED_R2, r2
508 st.q SP, SAVED_R3, r3
509 st.q SP, SAVED_R4, r4
510 st.q SP, SAVED_R5, r5
511 st.q SP, SAVED_R6, r6
512 st.q SP, SAVED_R18, r18
514 st.q SP, SAVED_T0, r3
516 #ifdef CONFIG_SH64_PAGE_TABLE_AUDIT
517 /* This use of DCR is not really legal, but there's nowhere else convenient to store this and it's only for last-resort debug anyway. */
522 /* Set args for Non-debug, Not a TLB miss class handler */
524 _loada ret_from_exception, r3
526 movi EVENT_FAULT_NOT_TLB, r4
529 _ptar handle_exception, t0
533 * Instead of the natural .balign 1024 place RESVEC here
534 * respecting the final 1KB alignment.
538 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
539 * block making sure the final alignment is correct.
542 /* Panic handler. Called with MMU off. Possible causes/actions:
543 * - Reset: Jump to program start.
544 * - Single Step: Turn off Single Step & return.
545 * - Others: Call panic handler, passing PC as arg.
546 * (this may need to be extended...)
550 /* First save r0-1 and tr0, as we need to use these */
551 _loada resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
560 sub r1, r0, r1 /* r1=0 if reset */
561 _loada _stext-CONFIG_CACHED_MEMORY_OFFSET, r0
564 beqi r1, 0, t0 /* Jump to start address if reset */
567 movi DEBUGSS_CAUSE, r1
568 sub r1, r0, r1 /* r1=0 if single step */
569 _ptar single_step_panic, t0
570 beqi r1, 0, t0 /* jump if single step */
572 /* If we get here, we have an unknown panic. Just call the panic
573 * handler, passing saved PC. We never expect to return, so we can
574 * use any regs now. */
578 /* Prepare to jump to C - physical address */
579 _loada panic_handler-CONFIG_CACHED_MEMORY_OFFSET, r1
586 /* We are in a handler with Single Step set. We need to resume the
587 * handler, by turning on MMU & turning off Single Step. */
594 /* Restore EXPEVT, as the rte won't do this */
609 * Single step/software_break_point first level handler.
610 * Called with MMU off, so the first thing we do is enable it
611 * by doing an rte with appropriate SSR.
614 /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
615 _loada resvec_save_area-CONFIG_CACHED_MEMORY_OFFSET, SP
617 /* With the MMU off, we are bypassing the cache, so purge any
618 * data that will be made stale by the following stores.
630 /* Enable MMU, block exceptions, set priv mode, disable single step */
631 movi SR_MMU | SR_BL | SR_MD, r1
636 /* Force control to debug_exception_2 when rte is executed */
637 _loada debug_exeception_2, r0
638 ori r0, 1, r0 /* force SHmedia, just in case */
644 /* Restore saved regs */
646 _loada resvec_save_area, SP
654 /* Save other original registers into reg_save_area */
655 _loada reg_save_area, SP
656 st.q SP, SAVED_R2, r2
657 st.q SP, SAVED_R3, r3
658 st.q SP, SAVED_R4, r4
659 st.q SP, SAVED_R5, r5
660 st.q SP, SAVED_R6, r6
661 st.q SP, SAVED_R18, r18
663 st.q SP, SAVED_T0, r3
665 /* Set args for debug class handler */
667 _loada ret_from_exception, r3
672 _ptar handle_exception, t0
678 * Not supported. If we ever get here loop forever
679 * We may be MMUOFF or MMUON. Just use pic code.
681 _ptar debug_interrupt, t0
685 LRESVEC_block_end: /* Marker. Unused. */
688 _loada reg_save_area, SP
689 /* SP is guaranteed 32-byte aligned. */
690 st.q SP, TLB_SAVED_R0 , r0
691 st.q SP, TLB_SAVED_R1 , r1
692 st.q SP, SAVED_R2 , r2
693 st.q SP, SAVED_R3 , r3
694 st.q SP, SAVED_R4 , r4
695 st.q SP, SAVED_R5 , r5
696 st.q SP, SAVED_R6 , r6
697 st.q SP, SAVED_R18, r18
699 /* Save R25 for safety; as/ld may want to use it to achieve the call to
700 * the code in mm/tlbmiss.c */
701 st.q SP, TLB_SAVED_R25, r25
707 st.q SP, SAVED_T0 , r2
708 st.q SP, TLB_SAVED_T1 , r3
709 st.q SP, TLB_SAVED_T2 , r4
710 st.q SP, TLB_SAVED_T3 , r5
711 st.q SP, TLB_SAVED_T4 , r18
713 pt do_fast_page_fault, tr0
718 andi r2, 1, r2 /* r2 = SSR.MD */
721 pt fixup_to_invoke_general_handler, tr1
723 /* If the fast path handler fixed the fault, just drop through quickly
724 to the restore code right away to return to the excepting context.
728 fast_tlb_miss_restore:
729 ld.q SP, SAVED_T0, r2
730 ld.q SP, TLB_SAVED_T1, r3
731 ld.q SP, TLB_SAVED_T2, r4
733 ld.q SP, TLB_SAVED_T3, r5
734 ld.q SP, TLB_SAVED_T4, r18
742 ld.q SP, TLB_SAVED_R0, r0
743 ld.q SP, TLB_SAVED_R1, r1
744 ld.q SP, SAVED_R2, r2
745 ld.q SP, SAVED_R3, r3
746 ld.q SP, SAVED_R4, r4
747 ld.q SP, SAVED_R5, r5
748 ld.q SP, SAVED_R6, r6
749 ld.q SP, SAVED_R18, r18
750 ld.q SP, TLB_SAVED_R25, r25
754 nop /* for safety, in case the code is run on sh5-101 cut1.x */
756 fixup_to_invoke_general_handler:
758 /* OK, new method. Restore stuff that's not expected to get saved into
759 the 'first-level' reg save area, then just fall through to setting
760 up the registers and calling the second-level handler. */
762 /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
763 r25,tr1-4 and save r6 to get into the right state. */
765 ld.q SP, TLB_SAVED_T1, r3
766 ld.q SP, TLB_SAVED_T2, r4
767 ld.q SP, TLB_SAVED_T3, r5
768 ld.q SP, TLB_SAVED_T4, r18
769 ld.q SP, TLB_SAVED_R25, r25
771 ld.q SP, TLB_SAVED_R0, r0
772 ld.q SP, TLB_SAVED_R1, r1
779 #ifdef CONFIG_SH64_PAGE_TABLE_AUDIT
780 /* This use of DCR is not really legal, but there's nowhere else convenient to store this and it's only for last-resort debug anyway. */
785 /* Set args for Non-debug, TLB miss class handler */
787 _loada ret_from_exception, r3
789 movi EVENT_FAULT_TLB, r4
792 _ptar handle_exception, t0
795 /* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
796 DOES END UP AT VBR+0x600 */
811 /* Save original stack pointer into KCR1 */
814 /* Save other original registers into reg_save_area */
815 _loada reg_save_area, SP
816 st.q SP, SAVED_R2, r2
817 st.q SP, SAVED_R3, r3
818 st.q SP, SAVED_R4, r4
819 st.q SP, SAVED_R5, r5
820 st.q SP, SAVED_R6, r6
821 st.q SP, SAVED_R18, r18
823 st.q SP, SAVED_T0, r3
825 #ifdef CONFIG_SH64_PAGE_TABLE_AUDIT
826 /* This use of DCR is not really legal, but there's nowhere else convenient to store this and it's only for last-resort debug anyway. */
831 /* Set args for interrupt class handler */
833 _loada ret_from_irq, r3
835 movi EVENT_INTERRUPT, r4
838 _ptar handle_exception, t0
840 .balign TEXT_SIZE /* let's waste the bare minimum */
842 LVBR_block_end: /* Marker. Used for total checking */
846 * Second level handler for VBR-based exceptions. Pre-handler.
847 * In common to all stack-frame sensitive handlers.
850 * (KCR0) Current [current task union]
853 * (r3) appropriate return address
854 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
855 * (r5) Pointer to reg_save_area
858 * Available registers:
865 /* Common 2nd level handler. */
867 /* First thing we need an appropriate stack pointer */
872 bne r6, ZERO, t0 /* Original stack pointer is fine */
874 /* Set stack pointer for user fault */
876 movi THREAD_SIZE, r6 /* Point to the end */
880 /* Make some room for the BASIC frame. */
881 movi -(FRAME_SIZE), r6
884 /* Could do this with no stalling if we had another spare register, but the
885 code below will be OK. */
886 ld.q r5, SAVED_R2, r6
887 ld.q r5, SAVED_R3, r18
888 st.q SP, FRAME_R(2), r6
889 ld.q r5, SAVED_R4, r6
890 st.q SP, FRAME_R(3), r18
891 ld.q r5, SAVED_R5, r18
892 st.q SP, FRAME_R(4), r6
893 ld.q r5, SAVED_R6, r6
894 st.q SP, FRAME_R(5), r18
895 ld.q r5, SAVED_R18, r18
896 st.q SP, FRAME_R(6), r6
897 ld.q r5, SAVED_T0, r6
898 st.q SP, FRAME_R(18), r18
899 st.q SP, FRAME_T(0), r6
901 /* Keep old SP around */
904 /* Save the rest of the general purpose registers */
905 st.q SP, FRAME_R(0), r0
906 st.q SP, FRAME_R(1), r1
907 st.q SP, FRAME_R(7), r7
908 st.q SP, FRAME_R(8), r8
909 st.q SP, FRAME_R(9), r9
910 st.q SP, FRAME_R(10), r10
911 st.q SP, FRAME_R(11), r11
912 st.q SP, FRAME_R(12), r12
913 st.q SP, FRAME_R(13), r13
914 st.q SP, FRAME_R(14), r14
916 /* SP is somewhere else */
917 st.q SP, FRAME_R(15), r6
919 st.q SP, FRAME_R(16), r16
920 st.q SP, FRAME_R(17), r17
921 /* r18 is saved earlier. */
922 st.q SP, FRAME_R(19), r19
923 st.q SP, FRAME_R(20), r20
924 st.q SP, FRAME_R(21), r21
925 st.q SP, FRAME_R(22), r22
926 st.q SP, FRAME_R(23), r23
927 st.q SP, FRAME_R(24), r24
928 st.q SP, FRAME_R(25), r25
929 st.q SP, FRAME_R(26), r26
930 st.q SP, FRAME_R(27), r27
931 st.q SP, FRAME_R(28), r28
932 st.q SP, FRAME_R(29), r29
933 st.q SP, FRAME_R(30), r30
934 st.q SP, FRAME_R(31), r31
935 st.q SP, FRAME_R(32), r32
936 st.q SP, FRAME_R(33), r33
937 st.q SP, FRAME_R(34), r34
938 st.q SP, FRAME_R(35), r35
939 st.q SP, FRAME_R(36), r36
940 st.q SP, FRAME_R(37), r37
941 st.q SP, FRAME_R(38), r38
942 st.q SP, FRAME_R(39), r39
943 st.q SP, FRAME_R(40), r40
944 st.q SP, FRAME_R(41), r41
945 st.q SP, FRAME_R(42), r42
946 st.q SP, FRAME_R(43), r43
947 st.q SP, FRAME_R(44), r44
948 st.q SP, FRAME_R(45), r45
949 st.q SP, FRAME_R(46), r46
950 st.q SP, FRAME_R(47), r47
951 st.q SP, FRAME_R(48), r48
952 st.q SP, FRAME_R(49), r49
953 st.q SP, FRAME_R(50), r50
954 st.q SP, FRAME_R(51), r51
955 st.q SP, FRAME_R(52), r52
956 st.q SP, FRAME_R(53), r53
957 st.q SP, FRAME_R(54), r54
958 st.q SP, FRAME_R(55), r55
959 st.q SP, FRAME_R(56), r56
960 st.q SP, FRAME_R(57), r57
961 st.q SP, FRAME_R(58), r58
962 st.q SP, FRAME_R(59), r59
963 st.q SP, FRAME_R(60), r60
964 st.q SP, FRAME_R(61), r61
965 st.q SP, FRAME_R(62), r62
968 * Save the S* registers.
971 st.q SP, FRAME_S(FSSR), r61
973 st.q SP, FRAME_S(FSPC), r62
974 movi -1, r62 /* Reset syscall_nr */
975 st.q SP, FRAME_S(FSYSCALL_ID), r62
977 /* Save the rest of the target registers */
979 st.q SP, FRAME_T(1), r6
981 st.q SP, FRAME_T(2), r6
983 st.q SP, FRAME_T(3), r6
985 st.q SP, FRAME_T(4), r6
987 st.q SP, FRAME_T(5), r6
989 st.q SP, FRAME_T(6), r6
991 st.q SP, FRAME_T(7), r6
993 /*#define POOR_MANS_STRACE 1*/
995 #ifdef POOR_MANS_STRACE
996 /* We've pushed all the registers now, so only r2-r4 hold anything
997 * useful. Move them into callee save registers */
1002 /* Preserve r2 as the event code */
1003 _loada evt_debug, r3
1007 /* or SP, ZERO, r5 */
1017 /* For syscall and debug race condition, get TRA now */
1020 /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
1021 * Also set FD, to catch FPU usage in the kernel.
1023 * benedict.gaster@superh.com 29/07/2002
1025 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
1026 * same time change BL from 1->0, as any pending interrupt of a level
1027 * higher than he previous value of IMASK will leak through and be
1028 * taken unexpectedly.
1030 * To avoid this we raise the IMASK and then issue another PUTCON to
1031 * enable interrupts.
1034 movi SR_IMASK | SR_FD, r7
1037 movi SR_UNBLOCK_EXC, r7
1042 /* Now call the appropriate 3rd level handler */
1044 _loada trap_jtable, r3
1053 * Second level handler for VBR-based exceptions. Post-handlers.
1055 * Post-handlers for interrupts (ret_from_irq), exceptions
1056 * (ret_from_exception) and common reentrance doors (restore_all
1057 * to get back to the original context, ret_from_syscall loop to
1058 * check kernel exiting).
1060 * ret_with_reschedule and check_signals are an inner lables of
1061 * the ret_from_syscall loop.
1063 * In common to all stack-frame sensitive handlers.
1066 * (SP) struct pt_regs *, original register's frame pointer (basic)
1070 ld.q SP, FRAME_S(FSSR), r6
1073 _ptar restore_all, t0
1074 bne r6, ZERO, t0 /* no further checks */
1076 _ptar ret_with_reschedule, t0
1077 blink t0, ZERO /* Do not check softirqs */
1080 ld.q SP, FRAME_S(FSSR), r6
1083 _ptar restore_all, t0
1084 bne r6, ZERO, t0 /* no further checks */
1086 /* Check softirqs */
1090 * _ptar ret_from_syscall, t0
1096 ret_with_reschedule:
1098 ld.l r6, need_resched, r7
1099 _ptar check_signals, t0
1102 _ptar ret_from_syscall, t0
1106 blink t0, ZERO /* Call schedule(), return on top */
1110 ld.l r6, sigpending, r7
1111 _ptar restore_all, t0
1114 _loada do_signal, r6
1118 blink t0, LINK /* Call do_signal(regs, 0), return here */
1120 #ifdef CONFIG_SH64_PAGE_TABLE_AUDIT
1121 /* Check page tables before returning (with obvious performance penalty). */
1134 ld.q SP, FRAME_T(0), r6
1135 ld.q SP, FRAME_T(1), r7
1136 ld.q SP, FRAME_T(2), r8
1137 ld.q SP, FRAME_T(3), r9
1142 ld.q SP, FRAME_T(4), r6
1143 ld.q SP, FRAME_T(5), r7
1144 ld.q SP, FRAME_T(6), r8
1145 ld.q SP, FRAME_T(7), r9
1151 ld.q SP, FRAME_R(0), r0
1152 ld.q SP, FRAME_R(1), r1
1153 ld.q SP, FRAME_R(2), r2
1154 ld.q SP, FRAME_R(3), r3
1155 ld.q SP, FRAME_R(4), r4
1156 ld.q SP, FRAME_R(5), r5
1157 ld.q SP, FRAME_R(6), r6
1158 ld.q SP, FRAME_R(7), r7
1159 ld.q SP, FRAME_R(8), r8
1160 ld.q SP, FRAME_R(9), r9
1161 ld.q SP, FRAME_R(10), r10
1162 ld.q SP, FRAME_R(11), r11
1163 ld.q SP, FRAME_R(12), r12
1164 ld.q SP, FRAME_R(13), r13
1165 ld.q SP, FRAME_R(14), r14
1167 ld.q SP, FRAME_R(16), r16
1168 ld.q SP, FRAME_R(17), r17
1169 ld.q SP, FRAME_R(18), r18
1170 ld.q SP, FRAME_R(19), r19
1171 ld.q SP, FRAME_R(20), r20
1172 ld.q SP, FRAME_R(21), r21
1173 ld.q SP, FRAME_R(22), r22
1174 ld.q SP, FRAME_R(23), r23
1175 ld.q SP, FRAME_R(24), r24
1176 ld.q SP, FRAME_R(25), r25
1177 ld.q SP, FRAME_R(26), r26
1178 ld.q SP, FRAME_R(27), r27
1179 ld.q SP, FRAME_R(28), r28
1180 ld.q SP, FRAME_R(29), r29
1181 ld.q SP, FRAME_R(30), r30
1182 ld.q SP, FRAME_R(31), r31
1183 ld.q SP, FRAME_R(32), r32
1184 ld.q SP, FRAME_R(33), r33
1185 ld.q SP, FRAME_R(34), r34
1186 ld.q SP, FRAME_R(35), r35
1187 ld.q SP, FRAME_R(36), r36
1188 ld.q SP, FRAME_R(37), r37
1189 ld.q SP, FRAME_R(38), r38
1190 ld.q SP, FRAME_R(39), r39
1191 ld.q SP, FRAME_R(40), r40
1192 ld.q SP, FRAME_R(41), r41
1193 ld.q SP, FRAME_R(42), r42
1194 ld.q SP, FRAME_R(43), r43
1195 ld.q SP, FRAME_R(44), r44
1196 ld.q SP, FRAME_R(45), r45
1197 ld.q SP, FRAME_R(46), r46
1198 ld.q SP, FRAME_R(47), r47
1199 ld.q SP, FRAME_R(48), r48
1200 ld.q SP, FRAME_R(49), r49
1201 ld.q SP, FRAME_R(50), r50
1202 ld.q SP, FRAME_R(51), r51
1203 ld.q SP, FRAME_R(52), r52
1204 ld.q SP, FRAME_R(53), r53
1205 ld.q SP, FRAME_R(54), r54
1206 ld.q SP, FRAME_R(55), r55
1207 ld.q SP, FRAME_R(56), r56
1208 ld.q SP, FRAME_R(57), r57
1209 ld.q SP, FRAME_R(58), r58
1212 movi SR_BLOCK_EXC, r60
1214 putcon r59, SR /* SR.BL = 1, keep nesting out */
1215 ld.q SP, FRAME_S(FSSR), r61
1216 ld.q SP, FRAME_S(FSPC), r62
1217 movi SR_ASID_MASK, r60
1219 andc r61, r60, r61 /* Clear out older ASID */
1220 or r59, r61, r61 /* Retain current ASID */
1224 /* Ignore FSYSCALL_ID */
1226 ld.q SP, FRAME_R(59), r59
1227 ld.q SP, FRAME_R(60), r60
1228 ld.q SP, FRAME_R(61), r61
1229 ld.q SP, FRAME_R(62), r62
1232 ld.q SP, FRAME_R(15), SP
1237 * Third level handlers for VBR-based exceptions. Adapting args to
1238 * and/or deflecting to fourth level handlers.
1240 * Fourth level handlers interface.
1241 * Most are C-coded handlers directly pointed by the trap_jtable.
1242 * (Third = Fourth level)
1244 * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
1245 * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1246 * (r3) struct pt_regs *, original register's frame pointer
1247 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1248 * (r5) TRA control register (for syscall/debug benefit only)
1249 * (LINK) return address
1252 * Kernel TLB fault handlers will get a slightly different interface.
1253 * (r2) struct pt_regs *, original register's frame pointer
1254 * (r3) writeaccess, whether it's a store fault as opposed to load fault
1255 * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
1256 * (r5) Effective Address of fault
1257 * (LINK) return address
1260 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1265 or ZERO, ZERO, r3 /* Read */
1266 or ZERO, ZERO, r4 /* Data */
1268 _ptar call_do_page_fault, t0
1273 movi 1, r3 /* Write */
1274 or ZERO, ZERO, r4 /* Data */
1276 _ptar call_do_page_fault, t0
1281 beqi/u r4, EVENT_INTERRUPT, t0
1283 or ZERO, ZERO, r3 /* Read */
1284 movi 1, r4 /* Text */
1289 _loada do_page_fault, r6
1295 beqi/l r4, EVENT_INTERRUPT, t0
1296 #ifndef CONFIG_NOFPU_SUPPORT
1297 _loada do_fpu_state_restore, r6
1299 _loada do_exception_error, r6
1306 beqi/l r4, EVENT_INTERRUPT, t0
1307 #ifndef CONFIG_NOFPU_SUPPORT
1308 _loada do_fpu_state_restore, r6
1310 _loada do_exception_error, r6
1321 * system_call/unknown_trap third level handler:
1324 * (r2) fault/interrupt code, entry number (TRAP = 11)
1325 * (r3) struct pt_regs *, original register's frame pointer
1326 * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1327 * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1329 * (LINK) return address: ret_from_exception
1330 * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1333 * (*r3) Syscall reply (Saved r2)
1334 * (LINK) In case of syscall only it can be scrapped.
1335 * Common second level post handler will be ret_from_syscall.
1336 * Common (non-trace) exit point to that is syscall_ret (saving
1337 * result to r2). Common bad exit point is syscall_bad (returning
1338 * ENOSYS then saved to r2).
1343 /* Unknown Trap or User Trace */
1344 _loada do_unknown_trapa, r6
1346 ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
1347 andi r2, 0x1ff, r2 /* r2 = syscall # */
1350 _ptar syscall_ret, t0
1353 /* New syscall implementation*/
1355 _ptar unknown_trap, t0
1356 or r5, ZERO, r4 /* TRA (=r5) -> r4 */
1358 bnei r4, 1, t0 /* unknown_trap if not 0x1yzzzz */
1360 /* It's a system call */
1361 st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
1362 andi r5, 0x1ff, r5 /* syscall # -> r5 */
1366 _ptar syscall_allowed, t0
1367 movi NR_syscalls - 1, r4 /* Last valid */
1371 /* Return ENOSYS ! */
1372 movi -(ENOSYS), r2 /* Fall-through */
1374 st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
1376 #ifdef POOR_MANS_STRACE
1377 /* nothing useful in registers at this point */
1379 _loada evt_debug2, r5
1382 ld.q SP, FRAME_R(9), r2
1387 ld.q SP, FRAME_S(FSPC), r2
1388 addi r2, 4, r2 /* Move PC, being pre-execution event */
1389 st.q SP, FRAME_S(FSPC), r2
1390 _ptar ret_from_syscall, t0
1394 /* A different return path for ret_from_fork, because we now need
1395 * to call schedule_tail with the later kernels. Because prev is
1396 * loaded into r2 by switch_to() means we can just call it straight away
1399 .global ret_from_fork
1402 _loada schedule_tail,r5
1407 #ifdef POOR_MANS_STRACE
1408 /* nothing useful in registers at this point */
1410 _loada evt_debug2, r5
1413 ld.q SP, FRAME_R(9), r2
1418 ld.q SP, FRAME_S(FSPC), r2
1419 addi r2, 4, r2 /* Move PC, being pre-execution event */
1420 st.q SP, FRAME_S(FSPC), r2
1421 _ptar ret_from_syscall, t0
1427 /* Use LINK to deflect the exit point, default is syscall_ret */
1428 _ptar syscall_ret, t0
1430 _ptar syscall_notrace, t0
1434 andi r4, PT_TRACESYS, r4
1437 /* Trace it by calling syscall_trace before and after */
1438 _loada syscall_trace, r4
1441 /* Reload syscall number as r5 is trashed by syscall_trace */
1442 ld.q SP, FRAME_S(FSYSCALL_ID), r5
1445 _ptar syscall_ret_trace, t0
1449 /* Now point to the appropriate 4th level syscall handler */
1450 _loada sys_call_table, r4
1455 /* Prepare original args */
1456 ld.q SP, FRAME_R(2), r2
1457 ld.q SP, FRAME_R(3), r3
1458 ld.q SP, FRAME_R(4), r4
1459 ld.q SP, FRAME_R(5), r5
1460 ld.q SP, FRAME_R(6), r6
1461 ld.q SP, FRAME_R(7), r7
1463 /* And now the trick for those syscalls requiring regs * ! */
1467 blink t0, ZERO /* LINK is already properly set */
1470 /* We get back here only if under trace */
1471 st.q SP, FRAME_R(9), r2 /* Save return value */
1473 /* ... usage of a pt relative (_ptar _syscall_trace) fails on CDC */
1474 _loada syscall_trace, LINK
1478 /* This needs to be done after any syscall tracing */
1479 ld.q SP, FRAME_S(FSPC), r2
1480 addi r2, 4, r2 /* Move PC, being pre-execution event */
1481 st.q SP, FRAME_S(FSPC), r2
1483 _ptar ret_from_syscall, t0
1484 blink t0, ZERO /* Resume normal return sequence */
1487 * --- Switch to running under a particular ASID and return the previous ASID value
1488 * --- The caller is assumed to have done a cli before calling this.
1490 * Input r2 : new ASID
1491 * Output r2 : old ASID
1494 .global switch_and_save_asid
1495 switch_and_save_asid:
1498 shlli r4, 16, r4 /* r4 = mask to select ASID */
1499 and r0, r4, r3 /* r3 = shifted old ASID */
1500 andi r2, 255, r2 /* mask down new ASID */
1501 shlli r2, 16, r2 /* align new ASID against SR.ASID */
1502 andc r0, r4, r0 /* efface old ASID from SR */
1503 or r0, r2, r0 /* insert the new ASID */
1511 shlri r3, 16, r2 /* r2 = old ASID */
1514 .global route_to_panic_handler
1515 route_to_panic_handler:
1516 /* Switch to real mode, goto panic_handler, don't return. Useful for
1517 last-chance debugging, e.g. if no output wants to go to the console.
1520 _loada panic_handler - CONFIG_CACHED_MEMORY_OFFSET, r1
1532 1: /* Now in real mode */
1536 .global peek_real_address_q
1537 peek_real_address_q:
1539 r2 : real mode address to peek
1540 r2(out) : result quadword
1542 This is provided as a cheapskate way of manipulating device
1543 registers for debugging (to avoid the need to onchip_remap the debug
1544 module, and to avoid the need to onchip_remap the watchpoint
1545 controller in a way that identity maps sufficient bits to avoid the
1546 SH5-101 cut2 silicon defect).
1548 This code is not performance critical
1551 add.l r2, r63, r2 /* sign extend address */
1552 getcon sr, r0 /* r0 = saved original SR */
1555 or r0, r1, r1 /* r0 with block bit set */
1556 putcon r1, sr /* now in critical section */
1559 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1562 _loada .peek0 - CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
1563 _loada 1f, r37 /* virtual mode return addr */
1570 .peek0: /* come here in real mode, don't touch caches!!
1571 still in critical section (sr.bl==1) */
1574 /* Here's the actual peek. If the address is bad, all bets are now off
1575 * what will happen (handlers invoked in real-mode = bad news) */
1578 rte /* Back to virtual mode */
1585 .global poke_real_address_q
1586 poke_real_address_q:
1588 r2 : real mode address to poke
1589 r3 : quadword value to write.
1591 This is provided as a cheapskate way of manipulating device
1592 registers for debugging (to avoid the need to onchip_remap the debug
1593 module, and to avoid the need to onchip_remap the watchpoint
1594 controller in a way that identity maps sufficient bits to avoid the
1595 SH5-101 cut2 silicon defect).
1597 This code is not performance critical
1600 add.l r2, r63, r2 /* sign extend address */
1601 getcon sr, r0 /* r0 = saved original SR */
1604 or r0, r1, r1 /* r0 with block bit set */
1605 putcon r1, sr /* now in critical section */
1608 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1611 _loada .poke0-CONFIG_CACHED_MEMORY_OFFSET, r36 /* real mode target address */
1612 _loada 1f, r37 /* virtual mode return addr */
1619 .poke0: /* come here in real mode, don't touch caches!!
1620 still in critical section (sr.bl==1) */
1623 /* Here's the actual poke. If the address is bad, all bets are now off
1624 * what will happen (handlers invoked in real-mode = bad news) */
1627 rte /* Back to virtual mode */
1635 * --- User Access Handling Section
1639 * User Access support. It all moved to non inlined Assembler
1640 * functions in here.
1642 * __kernel_size_t __copy_user(void *__to, const void *__from,
1643 * __kernel_size_t __n)
1646 * (r2) target address
1647 * (r3) source address
1648 * (r4) size in bytes
1652 * (r2) non-copied bytes
1654 * If a fault occurs on the user pointer, bail out early and return the
1655 * number of bytes not copied in r2.
1656 * Strategy : for large blocks, call a real memcpy function which can
1657 * move >1 byte at a time using unaligned ld/st instructions, and can
1658 * manipulate the cache using prefetch + alloco to improve the speed
1659 * further. If a fault occurs in that function, just revert to the
1660 * byte-by-byte approach used for small blocks; this is rare so the
1661 * performance hit for that case does not matter.
1663 * For small blocks it's not worth the overhead of setting up and calling
1664 * the memcpy routine; do the copy a byte at a time.
1669 _ptar __copy_user_byte_by_byte, t1
1670 movi 16, r0 ! this value is a best guess, should tune it by benchmarking
1672 _ptar copy_user_memcpy, t0
1674 /* Save arguments in case we have to fix-up unhandled page fault */
1678 st.q r15, 24, r35 ! r35 is callee-save
1679 /* Save LINK in a register to reduce RTS time later (otherwise
1680 ld r15,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1684 /* Copy completed normally if we get back here */
1687 /* don't restore r2-r4, pointless */
1688 /* set result=r2 to zero as the copy must have succeeded. */
1691 blink tr0, r63 ! RTS
1693 .global __copy_user_fixup
1695 /* Restore stack frame */
1702 /* Fall through to original code, in the 'same' state we entered with */
1704 /* The slow byte-by-byte method is used if the fast copy traps due to a bad
1705 user address. In that rare case, the speed drop can be tolerated. */
1706 __copy_user_byte_by_byte:
1707 _ptar ___copy_user_exit, t1
1708 _ptar ___copy_user1, t0
1709 beq/u r4, r63, t1 /* early exit for zero length copy */
1714 ld.b r3, 0, r5 /* Fault address 1 */
1716 /* Could rewrite this to use just 1 add, but the second comes 'free'
1717 due to load latency */
1719 addi r4, -1, r4 /* No real fixup required */
1721 stx.b r3, r0, r5 /* Fault address 2 */
1730 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1733 * (r2) target address
1734 * (r3) size in bytes
1737 * (*r2) zero-ed target data
1738 * (r2) non-zero-ed bytes
1740 .global __clear_user
1742 _ptar ___clear_user_exit, t1
1743 _ptar ___clear_user1, t0
1747 st.b r2, 0, ZERO /* Fault address */
1749 addi r3, -1, r3 /* No real fixup required */
1759 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1763 * (r2) target address
1764 * (r3) source address
1765 * (r4) maximum size in bytes
1769 * (r2) -EFAULT (in case of faulting)
1770 * copied data (otherwise)
1772 .global __strncpy_from_user
1773 __strncpy_from_user:
1774 _ptar ___strncpy_from_user1, t0
1775 _ptar ___strncpy_from_user_done, t1
1776 or r4, ZERO, r5 /* r5 = original count */
1777 beq/u r4, r63, t1 /* early exit if r4==0 */
1778 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1779 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1781 ___strncpy_from_user1:
1782 ld.b r3, 0, r7 /* Fault address: only in reading */
1787 addi r4, -1, r4 /* return real number of copied bytes */
1790 ___strncpy_from_user_done:
1791 sub r5, r4, r6 /* If done, return copied */
1793 ___strncpy_from_user_exit:
1799 * extern long __strnlen_user(const char *__s, long __n)
1802 * (r2) source address
1803 * (r3) source size in bytes
1806 * (r2) -EFAULT (in case of faulting)
1807 * string length (otherwise)
1809 .global __strnlen_user
1811 _ptar ___strnlen_user_set_reply, t0
1812 _ptar ___strnlen_user1, t1
1813 or ZERO, ZERO, r5 /* r5 = counter */
1814 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1815 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1819 ldx.b r2, r5, r7 /* Fault address: only in reading */
1820 addi r3, -1, r3 /* No real fixup */
1824 ! The line below used to be active. This meant led to a junk byte lying between each pair
1825 ! of entries in the argv & envp structures in memory. Whilst the program saw the right data
1826 ! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1827 ! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1828 ! addi r5, 1, r5 /* Include '\0' */
1830 ___strnlen_user_set_reply:
1831 or r5, ZERO, r6 /* If done, return counter */
1833 ___strnlen_user_exit:
1839 * extern long __get_user_asm_?(void *val, long addr)
1843 * (r3) source address (in User Space)
1846 * (r2) -EFAULT (faulting)
1849 .global __get_user_asm_b
1852 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1855 ld.b r3, 0, r5 /* r5 = data */
1859 ___get_user_asm_b_exit:
1864 .global __get_user_asm_w
1867 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1870 ld.w r3, 0, r5 /* r5 = data */
1874 ___get_user_asm_w_exit:
1879 .global __get_user_asm_l
1882 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1885 ld.l r3, 0, r5 /* r5 = data */
1889 ___get_user_asm_l_exit:
1894 .global __get_user_asm_q
1897 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1900 ld.q r3, 0, r5 /* r5 = data */
1904 ___get_user_asm_q_exit:
1909 * extern long __put_user_asm_?(void *pval, long addr)
1912 * (r2) kernel pointer to value
1913 * (r3) dest address (in User Space)
1916 * (r2) -EFAULT (faulting)
1919 .global __put_user_asm_b
1921 ld.b r2, 0, r4 /* r4 = data */
1922 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1928 ___put_user_asm_b_exit:
1933 .global __put_user_asm_w
1935 ld.w r2, 0, r4 /* r4 = data */
1936 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1942 ___put_user_asm_w_exit:
1947 .global __put_user_asm_l
1949 ld.l r2, 0, r4 /* r4 = data */
1950 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1956 ___put_user_asm_l_exit:
1961 .global __put_user_asm_q
1963 ld.q r2, 0, r4 /* r4 = data */
1964 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1970 ___put_user_asm_q_exit:
1976 * --- Signal Handling Section
1980 * extern long long _sa_default_rt_restorer
1981 * extern long long _sa_default_restorer
1985 * extern void _sa_default_rt_restorer(void)
1986 * extern void _sa_default_restorer(void)
1988 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1989 * from user space. Copied into user space by signal management.
1990 * Both must be quad aligned and 2 quad long (4 instructions).
1994 .global sa_default_rt_restorer
1995 sa_default_rt_restorer:
1997 shori __NR_rt_sigreturn, r9
2002 .global sa_default_restorer
2003 sa_default_restorer:
2005 shori __NR_sigreturn, r9
2010 * --- __ex_table Section
2014 * User Access Exception Table.
2016 .section __ex_table, "a"
2018 .global asm_uaccess_start /* Just a marker */
2021 .long ___copy_user1, ___copy_user_exit
2022 .long ___copy_user2, ___copy_user_exit
2023 .long ___clear_user1, ___clear_user_exit
2024 .long ___strncpy_from_user1, ___strncpy_from_user_exit
2025 .long ___strnlen_user1, ___strnlen_user_exit
2026 .long ___get_user_asm_b1, ___get_user_asm_b_exit
2027 .long ___get_user_asm_w1, ___get_user_asm_w_exit
2028 .long ___get_user_asm_l1, ___get_user_asm_l_exit
2029 .long ___get_user_asm_q1, ___get_user_asm_q_exit
2030 .long ___put_user_asm_b1, ___put_user_asm_b_exit
2031 .long ___put_user_asm_w1, ___put_user_asm_w_exit
2032 .long ___put_user_asm_l1, ___put_user_asm_l_exit
2033 .long ___put_user_asm_q1, ___put_user_asm_q_exit
2035 .global asm_uaccess_end /* Just a marker */
2042 * --- .text.init Section
2045 .section .text.init, "ax"
2048 * void trap_init (void)
2053 addi SP, -24, SP /* Room to save r28/r29/r30 */
2058 /* Set VBR and RESVEC */
2059 _loada LVBR_block, r19
2060 andi r19, -4, r19 /* reset MMUOFF + reserved */
2061 /* For RESVEC exceptions we force the MMU off, which means we need the
2062 physical address. */
2063 _loada LRESVEC_block-CONFIG_CACHED_MEMORY_OFFSET, r20
2064 andi r20, -4, r20 /* reset reserved */
2065 ori r20, 1, r20 /* set MMUOFF */
2070 _loada LVBR_block_end, r21
2072 movi BLOCK_SIZE, r29 /* r29 = expected size */
2077 * Ugly, but better loop forever now than crash afterwards.
2078 * We should print a message, but if we touch LVBR or
2079 * LRESVEC blocks we should not be surprised if we get stuck
2082 _ptar trap_init_loop, t1
2083 gettr t1, r28 /* r28 = trap_init_loop */
2084 sub r21, r30, r30 /* r30 = actual size */
2087 * VBR/RESVEC handlers overlap by being bigger than
2088 * allowed. Very bad. Just loop forever.
2089 * (r28) panic/loop address
2090 * (r29) expected size
2096 /* Now that exception vectors are set up reset SR.BL */
2098 movi SR_UNBLOCK_EXC, r23