2 * BK Id: SCCS/s.head.S 1.78 12/18/02 00:17:27 benh
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
9 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
10 * Adapted for Power Macintosh by Paul Mackerras.
11 * Low-level exception handlers and MMU support
12 * rewritten by Paul Mackerras.
13 * Copyright (C) 1996 Paul Mackerras.
14 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
15 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
17 * This file contains the low-level support and setup for the
18 * PowerPC platform, including trap and interrupt dispatch.
19 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
21 * This program is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU General Public License
23 * as published by the Free Software Foundation; either version
24 * 2 of the License, or (at your option) any later version.
28 #include <linux/config.h>
29 #include <linux/threads.h>
30 #include <asm/processor.h>
33 #include <asm/pgtable.h>
34 #include <asm/cputable.h>
35 #include <asm/cache.h>
36 #include <asm/ppc_asm.h>
40 #include <asm/amigappc.h>
43 #ifdef CONFIG_PPC64BRIDGE
44 #define LOAD_BAT(n, reg, RA, RB) \
45 ld RA,(n*32)+0(reg); \
46 ld RB,(n*32)+8(reg); \
47 mtspr IBAT##n##U,RA; \
48 mtspr IBAT##n##L,RB; \
49 ld RA,(n*32)+16(reg); \
50 ld RB,(n*32)+24(reg); \
51 mtspr DBAT##n##U,RA; \
52 mtspr DBAT##n##L,RB; \
54 #else /* CONFIG_PPC64BRIDGE */
56 /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
57 #define LOAD_BAT(n, reg, RA, RB) \
58 /* see the comment for clear_bats() -- Cort */ \
60 mtspr IBAT##n##U,RA; \
61 mtspr DBAT##n##U,RA; \
62 lwz RA,(n*16)+0(reg); \
63 lwz RB,(n*16)+4(reg); \
64 mtspr IBAT##n##U,RA; \
65 mtspr IBAT##n##L,RB; \
67 lwz RA,(n*16)+8(reg); \
68 lwz RB,(n*16)+12(reg); \
69 mtspr DBAT##n##U,RA; \
70 mtspr DBAT##n##L,RB; \
72 #endif /* CONFIG_PPC64BRIDGE */
75 .stabs "arch/ppc/kernel/",N_SO,0,0,0f
76 .stabs "head.S",N_SO,0,0,0f
82 * _start is defined this way because the XCOFF loader in the OpenFirmware
83 * on the powermac expects the entry point to be a procedure descriptor.
89 * These are here for legacy reasons, the kernel used to
90 * need to look like a coff function entry for the pmac
91 * but we're always started by some kind of bootloader now.
94 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
95 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
99 * Enter here with the kernel text, data and bss loaded starting at
100 * 0, running with virtual == physical mapping.
101 * r5 points to the prom entry point (the client interface handler
102 * address). Address translation is turned on, with the prom
103 * managing the hash table. Interrupts are disabled. The stack
104 * pointer (r1) points to just below the end of the half-meg region
105 * from 0x380000 - 0x400000, which is mapped in already.
107 * If we are booted from MacOS via BootX, we enter with the kernel
108 * image loaded somewhere, and the following values in registers:
109 * r3: 'BooX' (0x426f6f58)
110 * r4: virtual address of boot_infos_t
115 * r4: physical address of memory base
116 * Linux/m68k style BootInfo structure at &_end.
119 * This is jumped to on prep systems right after the kernel is relocated
120 * to its proper place in memory by the boot loader. The expected layout
122 * r3: ptr to residual data
123 * r4: initrd_start or if no initrd then 0
124 * r5: initrd_end - unused if r4 is 0
125 * r6: Start of command line string
126 * r7: End of command line string
128 * This just gets a minimal mmu environment setup so we can call
129 * start_here() to do the real work.
136 * We have to do any OF calls before we map ourselves to KERNELBASE,
137 * because OF may have I/O devices mapped into that area
138 * (particularly on CHRP).
140 mr r31,r3 /* save parameters */
147 * early_init() does the early machine identification and does
148 * the necessary low-level setup and clears the BSS
149 * -- Cort <cort@fsmlabs.com>
154 /* On APUS the __va/__pa constants need to be set to the correct
155 * values before continuing.
159 #endif /* CONFIG_APUS */
161 /* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
162 * the physical address we are running at, returned by early_init()
170 #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)
175 * Call setup_cpu for CPU 0
179 bl call_setup_cpu /* Call setup_cpu for this CPU */
183 #endif /* CONFIG_6xx */
187 * We need to run with _start at physical address 0.
188 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
189 * the exception vectors at 0 (and therefore this copy
190 * overwrites OF's exception vectors with our own).
191 * If the MMU is already turned on, we copy stuff to KERNELBASE,
192 * otherwise we copy it to 0.
196 addis r4,r3,KERNELBASE@h /* current address of _start */
197 cmpwi 0,r4,0 /* are we already running at 0? */
199 #endif /* CONFIG_APUS */
201 * we now have the 1st 16M of ram mapped with the bats.
202 * prep needs the mmu to be turned on here, but pmac already has it on.
203 * this shouldn't bother the pmac since it just gets turned on again
204 * as we jump to our code at KERNELBASE. -- Cort
205 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
206 * off, and in other cases, we now turn it off before changing BATs above.
210 ori r0,r0,MSR_DR|MSR_IR
213 ori r0,r0,start_here@l
216 RFI /* enables MMU */
219 * We need __secondary_hold as a place to hold the other cpus on
220 * an SMP machine, even when we are running a UP kernel.
222 . = 0xc0 /* for prep bootloader */
223 li r3,1 /* MTX only has 1 cpu */
224 .globl __secondary_hold
226 /* tell the master we're here */
230 /* wait until we're told to start */
233 /* our cpu # was at addr 0 - go */
234 mr r24,r3 /* cpu # */
238 #endif /* CONFIG_SMP */
241 * Exception entry code. This code runs with address translation
242 * turned off, i.e. using physical addresses.
243 * We assume sprg3 has the physical address of the current
244 * task's thread_struct.
246 #define EXCEPTION_PROLOG \
250 mfspr r21,SPRG2; /* exception stack to use from */ \
251 cmpwi 0,r21,0; /* user mode or RTAS */ \
253 tophys(r21,r1); /* use tophys(kernel sp) otherwise */ \
254 subi r21,r21,INT_FRAME_SIZE; /* alloc exc. frame */\
256 stw r20,_CCR(r21); /* save registers */ \
257 stw r22,GPR22(r21); \
258 stw r23,GPR23(r21); \
260 stw r20,GPR20(r21); \
262 stw r22,GPR21(r21); \
264 stw r20,_LINK(r21); \
275 tovirt(r1,r21); /* set new kernel sp */ \
276 SAVE_4GPRS(3, r21); \
279 * Note: code which follows this uses cr0.eq (set if from kernel),
280 * r21, r22 (SRR0), and r23 (SRR1).
286 #define STD_EXCEPTION(n, label, hdlr) \
290 addi r3,r1,STACK_FRAME_OVERHEAD; \
292 bl transfer_to_handler; \
295 .long ret_from_except
298 /* core99 pmac starts the seconary here by changing the vector, and
299 putting it back to what it was (UnknownException) when done. */
300 #if defined(CONFIG_GEMINI) && defined(CONFIG_SMP)
302 b __secondary_start_gemini
304 STD_EXCEPTION(0x100, Reset, UnknownException)
311 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
312 STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
314 /* Data access exception. */
316 #ifdef CONFIG_PPC64BRIDGE
322 #endif /* CONFIG_PPC64BRIDGE */
325 andis. r0,r20,0xa470 /* weird error? */
326 bne 1f /* if not, try to put a PTE */
327 mfspr r4,DAR /* into the hash table */
328 rlwinm r3,r20,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
330 END_FTR_SECTION_IFSET(CPU_FTR_HPTE_TABLE)
331 1: stw r20,_DSISR(r21)
335 addi r3,r1,STACK_FRAME_OVERHEAD
337 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
338 bl transfer_to_handler
341 .long ret_from_except
343 #ifdef CONFIG_PPC64BRIDGE
344 /* SLB fault on data access. */
350 addi r3,r1,STACK_FRAME_OVERHEAD
352 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
353 bl transfer_to_handler
354 .long UnknownException
355 .long ret_from_except
356 #endif /* CONFIG_PPC64BRIDGE */
358 /* Instruction access exception. */
360 #ifdef CONFIG_PPC64BRIDGE
362 InstructionAccessCont:
366 #endif /* CONFIG_PPC64BRIDGE */
368 andis. r0,r23,0x4000 /* no pte found? */
369 beq 1f /* if so, try to put a PTE */
370 li r3,0 /* into the hash table */
371 mr r4,r22 /* SRR0 is fault address */
373 END_FTR_SECTION_IFSET(CPU_FTR_HPTE_TABLE)
374 1: addi r3,r1,STACK_FRAME_OVERHEAD
378 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
379 bl transfer_to_handler
382 .long ret_from_except
384 #ifdef CONFIG_PPC64BRIDGE
385 /* SLB fault on instruction access. */
388 InstructionSegmentCont:
389 addi r3,r1,STACK_FRAME_OVERHEAD
391 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
392 bl transfer_to_handler
393 .long UnknownException
394 .long ret_from_except
395 #endif /* CONFIG_PPC64BRIDGE */
397 /* External interrupt */
401 addi r3,r1,STACK_FRAME_OVERHEAD
404 bl transfer_to_handler
405 .globl do_IRQ_intercept
408 .long ret_from_intercept
410 /* Alignment exception */
418 addi r3,r1,STACK_FRAME_OVERHEAD
420 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
421 bl transfer_to_handler
423 .long AlignmentException
424 .long ret_from_except
426 /* Program check exception */
430 addi r3,r1,STACK_FRAME_OVERHEAD
432 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
433 bl transfer_to_handler
435 .long ProgramCheckException
436 .long ret_from_except
438 /* Floating-point unavailable */
442 bne load_up_fpu /* if from user, just load it up */
444 bl transfer_to_handler /* if from kernel, take a trap */
447 .long ret_from_except
452 addi r3,r1,STACK_FRAME_OVERHEAD
454 bl transfer_to_handler
455 .globl timer_interrupt_intercept
456 timer_interrupt_intercept:
457 .long timer_interrupt
458 .long ret_from_intercept
460 STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
461 STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
467 stw r3,ORIG_GPR3(r21)
469 rlwimi r20,r23,0,16,16 /* copy EE bit from saved MSR */
470 bl transfer_to_handler
472 .long ret_from_except
474 /* Single step - not used on 601 */
475 STD_EXCEPTION(0xd00, SingleStep, SingleStepException)
476 STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
479 * The Altivec unavailable trap is at 0x0f20. Foo.
480 * We effectively remap it to 0x3000.
485 addi r3,r1,STACK_FRAME_OVERHEAD
487 bl transfer_to_handler
488 .long UnknownException
489 .long ret_from_except
492 #ifdef CONFIG_ALTIVEC
500 * Handle TLB miss for instruction on 603/603e.
501 * Note: we get an alternate set of r0 - r3 to use automatically.
507 * r1: linux style pte ( later becomes ppc hardware pte )
508 * r2: ptr to linux-style pte
512 /* Get PTE (linux-style) and check access */
514 lis r1,KERNELBASE@h /* check if kernel address */
517 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
520 lis r2,swapper_pg_dir@ha /* if kernel address, use */
521 addi r2,r2,swapper_pg_dir@l /* kernel page table */
522 mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */
523 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
525 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
526 lwz r2,0(r2) /* get pmd entry */
527 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
528 beq- InstructionAddressInvalid /* return if no mapping */
530 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
531 lwz r3,0(r2) /* get linux-style pte */
532 andc. r1,r1,r3 /* check access & ~permission */
533 bne- InstructionAddressInvalid /* return if access not permitted */
534 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
536 * NOTE! We are assuming this is not an SMP system, otherwise
537 * we would need to update the pte atomically with lwarx/stwcx.
539 stw r3,0(r2) /* update PTE (accessed bit) */
540 /* Convert linux-style PTE to low word of PPC-style PTE */
541 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */
542 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
543 and r1,r1,r2 /* writable if _RW and _DIRTY */
544 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
545 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
546 ori r1,r1,0xe14 /* clear out reserved bits and M */
547 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
551 mfspr r3,SRR1 /* Need to restore CR0 */
554 InstructionAddressInvalid:
556 rlwinm r1,r3,9,6,6 /* Get load/store bit */
559 mtspr DSISR,r1 /* (shouldn't be needed) */
560 mtctr r0 /* Restore CTR */
561 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
564 mfspr r1,IMISS /* Get failing address */
565 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
566 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
568 mtspr DAR,r1 /* Set fault address */
569 mfmsr r0 /* Restore "normal" registers */
570 xoris r0,r0,MSR_TGPR>>16
571 mtcrf 0x80,r3 /* Restore CR0 */
576 * Handle TLB miss for DATA Load operation on 603/603e
582 * r1: linux style pte ( later becomes ppc hardware pte )
583 * r2: ptr to linux-style pte
587 /* Get PTE (linux-style) and check access */
589 lis r1,KERNELBASE@h /* check if kernel address */
592 li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
595 lis r2,swapper_pg_dir@ha /* if kernel address, use */
596 addi r2,r2,swapper_pg_dir@l /* kernel page table */
597 mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */
598 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
600 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
601 lwz r2,0(r2) /* get pmd entry */
602 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
603 beq- DataAddressInvalid /* return if no mapping */
605 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
606 lwz r3,0(r2) /* get linux-style pte */
607 andc. r1,r1,r3 /* check access & ~permission */
608 bne- DataAddressInvalid /* return if access not permitted */
609 ori r3,r3,_PAGE_ACCESSED /* set _PAGE_ACCESSED in pte */
611 * NOTE! We are assuming this is not an SMP system, otherwise
612 * we would need to update the pte atomically with lwarx/stwcx.
614 stw r3,0(r2) /* update PTE (accessed bit) */
615 /* Convert linux-style PTE to low word of PPC-style PTE */
616 rlwinm r1,r3,32-10,31,31 /* _PAGE_RW -> PP lsb */
617 rlwinm r2,r3,32-7,31,31 /* _PAGE_DIRTY -> PP lsb */
618 and r1,r1,r2 /* writable if _RW and _DIRTY */
619 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
620 rlwimi r3,r3,32-1,31,31 /* _PAGE_USER -> PP lsb */
621 ori r1,r1,0xe14 /* clear out reserved bits and M */
622 andc r1,r3,r1 /* PP = user? (rw&dirty? 2: 3): 0 */
626 mfspr r3,SRR1 /* Need to restore CR0 */
631 rlwinm r1,r3,9,6,6 /* Get load/store bit */
634 mtctr r0 /* Restore CTR */
635 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
637 mfspr r1,DMISS /* Get failing address */
638 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
639 beq 20f /* Jump if big endian */
641 20: mtspr DAR,r1 /* Set fault address */
642 mfmsr r0 /* Restore "normal" registers */
643 xoris r0,r0,MSR_TGPR>>16
644 mtcrf 0x80,r3 /* Restore CR0 */
649 * Handle TLB miss for DATA Store on 603/603e
655 * r1: linux style pte ( later becomes ppc hardware pte )
656 * r2: ptr to linux-style pte
660 /* Get PTE (linux-style) and check access */
662 lis r1,KERNELBASE@h /* check if kernel address */
665 li r1,_PAGE_RW|_PAGE_USER|_PAGE_PRESENT /* access flags */
668 lis r2,swapper_pg_dir@ha /* if kernel address, use */
669 addi r2,r2,swapper_pg_dir@l /* kernel page table */
670 mfspr r1,SRR1 /* and MSR_PR bit from SRR1 */
671 rlwinm r1,r1,32-12,29,29 /* shift MSR_PR to _PAGE_USER posn */
673 rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
674 lwz r2,0(r2) /* get pmd entry */
675 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
676 beq- DataAddressInvalid /* return if no mapping */
678 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
679 lwz r3,0(r2) /* get linux-style pte */
680 andc. r1,r1,r3 /* check access & ~permission */
681 bne- DataAddressInvalid /* return if access not permitted */
682 ori r3,r3,_PAGE_ACCESSED|_PAGE_DIRTY
684 * NOTE! We are assuming this is not an SMP system, otherwise
685 * we would need to update the pte atomically with lwarx/stwcx.
687 stw r3,0(r2) /* update PTE (accessed/dirty bits) */
688 /* Convert linux-style PTE to low word of PPC-style PTE */
689 rlwimi r3,r3,32-1,30,30 /* _PAGE_USER -> PP msb */
690 li r1,0xe15 /* clear out reserved bits and M */
691 andc r1,r3,r1 /* PP = user? 2: 0 */
695 mfspr r3,SRR1 /* Need to restore CR0 */
699 STD_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint)
700 STD_EXCEPTION(0x1400, SMI, SMIException)
701 STD_EXCEPTION(0x1500, Trap_15, UnknownException)
702 STD_EXCEPTION(0x1600, Trap_16, UnknownException)
703 STD_EXCEPTION(0x1700, Trap_17, TAUException)
704 STD_EXCEPTION(0x1800, Trap_18, UnknownException)
705 STD_EXCEPTION(0x1900, Trap_19, UnknownException)
706 STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)
707 STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)
708 STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)
709 STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)
710 STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)
711 STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)
712 STD_EXCEPTION(0x2000, RunMode, RunModeException)
713 STD_EXCEPTION(0x2100, Trap_21, UnknownException)
714 STD_EXCEPTION(0x2200, Trap_22, UnknownException)
715 STD_EXCEPTION(0x2300, Trap_23, UnknownException)
716 STD_EXCEPTION(0x2400, Trap_24, UnknownException)
717 STD_EXCEPTION(0x2500, Trap_25, UnknownException)
718 STD_EXCEPTION(0x2600, Trap_26, UnknownException)
719 STD_EXCEPTION(0x2700, Trap_27, UnknownException)
720 STD_EXCEPTION(0x2800, Trap_28, UnknownException)
721 STD_EXCEPTION(0x2900, Trap_29, UnknownException)
722 STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)
723 STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)
724 STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)
725 STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)
726 STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)
727 STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)
731 #ifdef CONFIG_ALTIVEC
734 bne load_up_altivec /* if from user, just load it up */
736 bl transfer_to_handler /* if from kernel, take a trap */
738 .long ret_from_except
739 #endif /* CONFIG_ALTIVEC */
741 #ifdef CONFIG_PPC64BRIDGE
747 b InstructionAccessCont
753 b InstructionSegmentCont
754 #endif /* CONFIG_PPC64BRIDGE */
757 * This code finishes saving the registers to the exception frame
758 * and jumps to the appropriate handler for the exception, turning
759 * on address translation.
761 .globl transfer_to_handler
769 mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */
770 addi r2,r23,-THREAD /* set r2 to current */
772 addi r24,r1,STACK_FRAME_OVERHEAD
774 #ifdef CONFIG_ALTIVEC
776 mfspr r22,SPRN_VRSAVE /* if G4, save vrsave register value */
777 stw r22,THREAD_VRSAVE(r23)
778 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
779 #endif /* CONFIG_ALTIVEC */
780 .globl transfer_to_handler_cont
781 transfer_to_handler_cont:
784 andi. r24,r23,0x3f00 /* get vector offset */
788 mtspr SPRG2,r22 /* r1 is now kernel sp */
789 addi r24,r2,TASK_STRUCT_SIZE /* check for kernel stack overflow */
793 bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */
794 lwz r24,0(r23) /* virtual address of handler */
795 lwz r23,4(r23) /* where to go when done */
801 RFI /* jump to handler, enable MMU */
803 /* Out of line case when returning to kernel,
804 * check return from power_save_6xx
811 bt- 8,power_save_6xx_restore /* Check DOZE */
812 END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
814 bt- 9,power_save_6xx_restore /* Check NAP */
815 END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
816 b transfer_to_handler_cont
818 #endif /* CONFIG_6xx */
821 * On kernel stack overflow, load up an initial stack pointer
822 * and call StackOverflow(regs), which should not return.
825 addi r3,r1,STACK_FRAME_OVERHEAD
826 lis r1,init_task_union@ha
827 addi r1,r1,init_task_union@l
828 addi r1,r1,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD
829 lis r24,StackOverflow@ha
830 addi r24,r24,StackOverflow@l
839 * This task wants to use the FPU now.
840 * On UP, disable FP for the task which had the FPU previously,
841 * and save its floating-point registers in its thread_struct.
842 * Load up this task's FP registers from its thread_struct,
843 * enable the FPU for the current task and return to the task.
848 #ifdef CONFIG_PPC64BRIDGE
849 clrldi r5,r5,1 /* turn off 64-bit mode */
850 #endif /* CONFIG_PPC64BRIDGE */
852 MTMSRD(r5) /* enable use of fpu now */
855 * For SMP, we don't do lazy FPU switching because it just gets too
856 * horrendously complex, especially when a task switches from one CPU
857 * to another. Instead we call giveup_fpu in switch_to.
860 tophys(r6,0) /* get __pa constant */
861 addis r3,r6,last_task_used_math@ha
862 lwz r4,last_task_used_math@l(r3)
866 addi r4,r4,THREAD /* want last_task_used_math->thread */
869 stfd fr0,THREAD_FPSCR-4(r4)
872 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
873 li r20,MSR_FP|MSR_FE0|MSR_FE1
874 andc r4,r4,r20 /* disable FP for previous task */
875 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
877 #endif /* CONFIG_SMP */
878 /* enable use of FP after return */
879 mfspr r5,SPRG3 /* current task's THREAD (phys) */
880 lwz r4,THREAD_FPEXC_MODE(r5)
881 ori r23,r23,MSR_FP /* enable FP for current */
883 lfd fr0,THREAD_FPSCR-4(r5)
889 stw r4,last_task_used_math@l(r3)
890 #endif /* CONFIG_SMP */
891 /* restore registers and return */
898 /* we haven't used ctr or xer */
908 * FP unavailable trap from kernel - print a message, but let
909 * the task use FP in the kernel until it returns to user mode.
914 stw r3,_MSR(r1) /* enable use of FP after return */
917 mr r4,r2 /* current */
921 86: .string "floating point used in kernel (task=%p, pc=%x)\n"
924 #ifdef CONFIG_ALTIVEC
925 /* Note that the AltiVec support is closely modeled after the FP
926 * support. Changes to one are likely to be applicable to the
930 * Disable AltiVec for the task which had AltiVec previously,
931 * and save its AltiVec registers in its thread_struct.
932 * Enables AltiVec for use in the kernel on return.
933 * On SMP we know the AltiVec units are free, since we give it up every
938 mtmsr r5 /* enable use of AltiVec now */
941 * For SMP, we don't do lazy AltiVec switching because it just gets too
942 * horrendously complex, especially when a task switches from one CPU
943 * to another. Instead we call giveup_altivec in switch_to.
952 addis r3,r6,last_task_used_altivec@ha
953 lwz r4,last_task_used_altivec@l(r3)
957 addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
964 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
966 andc r4,r4,r20 /* disable altivec for previous task */
967 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
969 #endif /* CONFIG_SMP */
970 /* enable use of AltiVec after return */
971 oris r23,r23,MSR_VEC@h
972 mfspr r5,SPRG3 /* current task's THREAD (phys) */
980 stw r4,last_task_used_altivec@l(r3)
981 #endif /* CONFIG_SMP */
982 /* restore registers and return */
989 /* we haven't used ctr or xer */
999 * AltiVec unavailable trap from kernel - print a message, but let
1000 * the task use AltiVec in the kernel until it returns to user mode.
1004 oris r3,r3,MSR_VEC@h
1005 stw r3,_MSR(r1) /* enable use of AltiVec after return */
1008 mr r4,r2 /* current */
1012 87: .string "AltiVec used in kernel (task=%p, pc=%x) \n"
1016 * giveup_altivec(tsk)
1017 * Disable AltiVec for the task given as the argument,
1018 * and save the AltiVec registers in its thread_struct.
1019 * Enables AltiVec for use in the kernel on return.
1022 .globl giveup_altivec
1025 oris r5,r5,MSR_VEC@h
1027 mtmsr r5 /* enable use of AltiVec now */
1030 beqlr- /* if no previous owner, done */
1031 addi r3,r3,THREAD /* want THREAD of task */
1034 SAVE_32VR(0, r4, r3)
1039 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1041 andc r4,r4,r3 /* disable AltiVec for previous task */
1042 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1046 lis r4,last_task_used_altivec@ha
1047 stw r5,last_task_used_altivec@l(r4)
1048 #endif /* CONFIG_SMP */
1050 #endif /* CONFIG_ALTIVEC */
1054 * Disable FP for the task given as the argument,
1055 * and save the floating-point registers in its thread_struct.
1056 * Enables the FPU for use in the kernel on return.
1064 mtmsr r5 /* enable use of fpu now */
1068 beqlr- /* if no previous owner, done */
1069 addi r3,r3,THREAD /* want THREAD of task */
1074 stfd fr0,THREAD_FPSCR-4(r3)
1076 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1077 li r3,MSR_FP|MSR_FE0|MSR_FE1
1078 andc r4,r4,r3 /* disable FP for previous task */
1079 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1083 lis r4,last_task_used_math@ha
1084 stw r5,last_task_used_math@l(r4)
1085 #endif /* CONFIG_SMP */
1089 * This code is jumped to from the startup code to copy
1090 * the kernel image to physical address 0.
1093 addis r9,r26,klimit@ha /* fetch klimit */
1094 lwz r25,klimit@l(r9)
1095 addis r25,r25,-KERNELBASE@h
1096 li r3,0 /* Destination base address */
1097 li r6,0 /* Destination offset */
1098 li r5,0x4000 /* # bytes of memory to copy */
1099 bl copy_and_flush /* copy the first 0x4000 bytes */
1100 addi r0,r3,4f@l /* jump to the address of 4f */
1101 mtctr r0 /* in copy and do the rest. */
1102 bctr /* jump to the copy */
1104 bl copy_and_flush /* copy the rest */
1108 * Copy routine used to copy the kernel to start at physical address 0
1109 * and flush and invalidate the caches as needed.
1110 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
1111 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
1116 4: li r0,L1_CACHE_LINE_SIZE/4
1118 3: addi r6,r6,4 /* copy a cache line */
1122 dcbst r6,r3 /* write it to memory */
1124 icbi r6,r3 /* flush the icache line */
1127 sync /* additional sync needed on g4 */
1135 * On APUS the physical base address of the kernel is not known at compile
1136 * time, which means the __pa/__va constants used are incorrect. In the
1137 * __init section is recorded the virtual addresses of instructions using
1138 * these constants, so all that has to be done is fix these before
1139 * continuing the kernel boot.
1141 * r4 = The physical address of the kernel base.
1145 addis r10,r10,-KERNELBASE@h /* virt_to_phys constant */
1146 neg r11,r10 /* phys_to_virt constant */
1148 lis r12,__vtop_table_begin@h
1149 ori r12,r12,__vtop_table_begin@l
1150 add r12,r12,r10 /* table begin phys address */
1151 lis r13,__vtop_table_end@h
1152 ori r13,r13,__vtop_table_end@l
1153 add r13,r13,r10 /* table end phys address */
1156 1: lwzu r14,4(r12) /* virt address of instruction */
1157 add r14,r14,r10 /* phys address of instruction */
1158 lwz r15,0(r14) /* instruction, now insert top */
1159 rlwimi r15,r10,16,16,31 /* half of vp const in low half */
1160 stw r15,0(r14) /* of instruction and restore. */
1161 dcbst r0,r14 /* write it to memory */
1163 icbi r0,r14 /* flush the icache line */
1166 sync /* additional sync needed on g4 */
1170 * Map the memory where the exception handlers will
1171 * be copied to when hash constants have been patched.
1173 #ifdef CONFIG_APUS_FAST_EXCEPT
1178 ori r8,r8,0x2 /* 128KB, supervisor */
1182 lis r12,__ptov_table_begin@h
1183 ori r12,r12,__ptov_table_begin@l
1184 add r12,r12,r10 /* table begin phys address */
1185 lis r13,__ptov_table_end@h
1186 ori r13,r13,__ptov_table_end@l
1187 add r13,r13,r10 /* table end phys address */
1190 1: lwzu r14,4(r12) /* virt address of instruction */
1191 add r14,r14,r10 /* phys address of instruction */
1192 lwz r15,0(r14) /* instruction, now insert top */
1193 rlwimi r15,r11,16,16,31 /* half of pv const in low half*/
1194 stw r15,0(r14) /* of instruction and restore. */
1195 dcbst r0,r14 /* write it to memory */
1197 icbi r0,r14 /* flush the icache line */
1201 sync /* additional sync needed on g4 */
1202 isync /* No speculative loading until now */
1205 /***********************************************************************
1206 * Please note that on APUS the exception handlers are located at the
1207 * physical address 0xfff0000. For this reason, the exception handlers
1208 * cannot use relative branches to access the code below.
1209 ***********************************************************************/
1210 #endif /* CONFIG_APUS */
1213 #ifdef CONFIG_GEMINI
1214 .globl __secondary_start_gemini
1215 __secondary_start_gemini:
1225 #endif /* CONFIG_GEMINI */
1226 .globl __secondary_start_psurge
1227 __secondary_start_psurge:
1228 li r24,1 /* cpu # */
1229 b __secondary_start_psurge99
1230 .globl __secondary_start_psurge2
1231 __secondary_start_psurge2:
1232 li r24,2 /* cpu # */
1233 b __secondary_start_psurge99
1234 .globl __secondary_start_psurge3
1235 __secondary_start_psurge3:
1236 li r24,3 /* cpu # */
1237 b __secondary_start_psurge99
1238 __secondary_start_psurge99:
1239 /* we come in here with IR=0 and DR=1, and DBAT 0
1240 set to map the 0xf0000000 - 0xffffffff region */
1242 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
1247 .globl __secondary_start
1249 #ifdef CONFIG_PPC64BRIDGE
1251 clrldi r0,r0,1 /* make sure it's in 32-bit mode */
1257 lis r3,-KERNELBASE@h
1260 bl call_setup_cpu /* Call setup_cpu for this CPU */
1262 lis r3,-KERNELBASE@h
1264 #endif /* CONFIG_6xx */
1267 lis r2,current_set@h
1268 ori r2,r2,current_set@l
1270 slwi r24,r24,2 /* get current_set[cpu#] */
1274 addi r1,r2,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD
1279 /* load up the MMU */
1282 /* ptr to phys current thread */
1284 addi r4,r4,THREAD /* phys address of our thread_struct */
1288 mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
1290 /* enable MMU and jump to start_secondary */
1292 lis r3,start_secondary@h
1293 ori r3,r3,start_secondary@l
1298 #endif /* CONFIG_SMP */
1301 * Enable caches and 604-specific features if necessary.
1303 _GLOBAL(__setup_cpu_601)
1305 _GLOBAL(__setup_cpu_603)
1306 b setup_common_caches
1307 _GLOBAL(__setup_cpu_604)
1309 bl setup_common_caches
1313 _GLOBAL(__setup_cpu_750)
1315 bl setup_common_caches
1316 bl setup_750_7400_hid0
1319 _GLOBAL(__setup_cpu_750cx)
1321 bl setup_common_caches
1322 bl setup_750_7400_hid0
1326 _GLOBAL(__setup_cpu_750fx)
1328 bl setup_common_caches
1329 bl setup_750_7400_hid0
1333 _GLOBAL(__setup_cpu_7400)
1335 bl setup_7400_workarounds
1336 bl setup_common_caches
1337 bl setup_750_7400_hid0
1340 _GLOBAL(__setup_cpu_7410)
1342 bl setup_7410_workarounds
1343 bl setup_common_caches
1344 bl setup_750_7400_hid0
1349 _GLOBAL(__setup_cpu_7450)
1351 bl setup_common_caches
1352 bl setup_745x_specifics
1355 _GLOBAL(__setup_cpu_7455)
1357 bl setup_common_caches
1358 bl setup_745x_specifics
1361 _GLOBAL(__setup_cpu_power3)
1363 _GLOBAL(__setup_cpu_generic)
1366 /* Enable caches for 603's, 604, 750 & 7400 */
1367 setup_common_caches:
1369 andi. r0,r11,HID0_DCE
1370 #ifdef CONFIG_DCACHE_DISABLE
1371 ori r11,r11,HID0_ICE
1373 ori r11,r11,HID0_ICE|HID0_DCE
1375 ori r8,r11,HID0_ICFI
1376 bne 1f /* don't invalidate the D-cache */
1377 ori r8,r8,HID0_DCI /* unless it wasn't enabled */
1379 mtspr HID0,r8 /* enable and invalidate caches */
1381 mtspr HID0,r11 /* enable caches */
1386 /* 604, 604e, 604ev, ...
1387 * Enable superscalar execution & branch history table
1391 ori r11,r11,HID0_SIED|HID0_BHTE
1392 ori r8,r11,HID0_BTCD
1394 mtspr HID0,r8 /* flush branch target address cache */
1395 sync /* on 604e/604r */
1401 /* 7400 <= rev 2.7 and 7410 rev = 1.0 suffer from some
1402 * erratas we work around here.
1403 * Moto MPC710CE.pdf describes them, those are errata
1405 * Note that we assume the firmware didn't choose to
1406 * apply other workarounds (there are other ones documented
1407 * in the .pdf). It appear that Apple firmware only works
1408 * around #3 and with the same fix we use. We may want to
1409 * check if the CPU is using 60x bus mode in which case
1410 * the workaround for errata #4 is useless. Also, we may
1411 * want to explicitely clear HID0_NOPDST as this is not
1412 * needed once we have applied workaround #5 (though it's
1413 * not set by Apple's firmware at least).
1415 setup_7400_workarounds:
1417 rlwinm r3,r3,0,20,31
1421 setup_7410_workarounds:
1423 rlwinm r3,r3,0,20,31
1427 mfspr r11,SPRN_MSSSR0
1428 /* Errata #3: Set L1OPQ_SIZE to 0x10 */
1429 rlwinm r11,r11,0,9,6
1431 /* Errata #4: Set L2MQ_SIZE to 1 (check for MPX mode first ?) */
1433 /* Errata #5: Set DRLT_SIZE to 0x01 */
1434 rlwinm r11,r11,0,5,2
1437 mtspr SPRN_MSSSR0,r11
1442 /* 740/750/7400/7410
1443 * Enable Store Gathering (SGE), Address Brodcast (ABE),
1444 * Branch History Table (BHTE), Branch Target ICache (BTIC)
1445 * Dynamic Power Management (DPM), Speculative (SPD)
1446 * Clear Instruction cache throttling (ICTC)
1448 setup_750_7400_hid0:
1450 ori r11,r11,HID0_SGE | HID0_ABE | HID0_BHTE | HID0_BTIC
1452 oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
1453 END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
1455 andc r11,r11,r3 /* clear SPD: enable speculative */
1457 mtspr ICTC,r3 /* Instruction Cache Throttling off */
1465 * Looks like we have to disable NAP feature for some PLL settings...
1466 * (waiting for confirmation)
1469 mfspr r10, SPRN_HID1
1470 rlwinm r10,r10,4,28,31
1474 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
1475 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
1477 lwz r6,CPU_SPEC_FEATURES(r5)
1478 li r7,CPU_FTR_CAN_NAP
1480 stw r6,CPU_SPEC_FEATURES(r5)
1489 * Enable Store Gathering (SGE), Branch Folding (FOLD)
1490 * Branch History Table (BHTE), Branch Target ICache (BTIC)
1491 * Dynamic Power Management (DPM), Speculative (SPD)
1492 * Ensure our data cache instructions really operate.
1493 * Timebase has to be running or we wouldn't have made it here,
1494 * just ensure we don't disable it.
1495 * Clear Instruction cache throttling (ICTC)
1496 * Enable L2 HW prefetch
1498 setup_745x_specifics:
1499 /* We check for the presence of an L3 cache setup by
1500 * the firmware. If any, we disable NAP capability as
1501 * it's known to be bogus on rev 2.1 and earlier
1504 andis. r11,r11,L3CR_L3E@h
1506 lwz r6,CPU_SPEC_FEATURES(r5)
1507 andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
1509 li r7,CPU_FTR_CAN_NAP
1511 stw r6,CPU_SPEC_FEATURES(r5)
1515 /* All of the bits we have to set.....
1517 ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE | HID0_BTIC | HID0_LRSTK
1519 oris r11,r11,HID0_DPM@h /* enable dynamic power mgmt */
1520 END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
1522 /* All of the bits we have to clear....
1524 li r3,HID0_SPD | HID0_NOPDST | HID0_NOPTI
1525 andc r11,r11,r3 /* clear SPD: enable speculative */
1528 mtspr ICTC,r3 /* Instruction Cache Throttling off */
1534 /* Enable L2 HW prefetch
1536 mfspr r3,SPRN_MSSCR0
1539 mtspr SPRN_MSSCR0,r3
1545 * Load stuff into the MMU. Intended to be called with
1549 /* Load the SDR1 register (hash table base & size) */
1554 #ifdef CONFIG_PPC64BRIDGE
1555 /* clear the ASR so we only use the pseudo-segment registers. */
1558 #endif /* CONFIG_PPC64BRIDGE */
1559 li r0,16 /* load up segment register values */
1560 mtctr r0 /* for context 0 */
1561 lis r3,0x2000 /* Ku = 1, VSID = 0 */
1564 addi r3,r3,0x111 /* increment VSID */
1565 addis r4,r4,0x1000 /* address of next segment */
1567 /* Load the BAT registers with the values set up by MMU_init.
1568 MMU_init takes care of whether we're on a 601 or not. */
1575 LOAD_BAT(0,r3,r4,r5)
1576 LOAD_BAT(1,r3,r4,r5)
1577 LOAD_BAT(2,r3,r4,r5)
1578 LOAD_BAT(3,r3,r4,r5)
1582 * This is where the main kernel code starts.
1585 /* ptr to current */
1586 lis r2,init_task_union@h
1587 ori r2,r2,init_task_union@l
1588 /* Set up for using our exception vectors */
1589 /* ptr to phys current thread */
1591 addi r4,r4,THREAD /* init task's THREAD */
1595 mtspr SPRG2,r3 /* 0 => r1 has kernel sp */
1598 addi r1,r2,TASK_UNION_SIZE
1600 stwu r0,-STACK_FRAME_OVERHEAD(r1)
1602 * Do early bootinfo parsing, platform-specific initialization,
1603 * and set up the MMU.
1614 /* Copy exception code to exception vector base on APUS. */
1616 #ifdef CONFIG_APUS_FAST_EXCEPT
1617 lis r3,0xfff0 /* Copy to 0xfff00000 */
1619 lis r3,0 /* Copy to 0x00000000 */
1621 li r5,0x4000 /* # bytes of memory to copy */
1623 bl copy_and_flush /* copy the first 0x4000 bytes */
1624 #endif /* CONFIG_APUS */
1627 * Go back to running unmapped so we can load up new values
1628 * for SDR1 (hash table pointer) and the segment registers
1629 * and change to using our exception vectors.
1634 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1640 /* Load up the kernel context */
1642 sync /* Force all PTE updates to finish */
1644 tlbia /* Clear all TLB entries */
1645 sync /* wait for tlbia/tlbie to finish */
1646 TLBSYNC /* ... on all CPUs */
1650 #ifdef CONFIG_BDI_SWITCH
1651 /* Add helper information for the Abatron bdiGDB debugger.
1652 * We do this here because we know the mmu is disabled, and
1653 * will be enabled for real in just a few instructions.
1655 lis r5, abatron_pteptrs@h
1656 ori r5, r5, abatron_pteptrs@l
1657 stw r5, 0xf0(r0) /* This much match your Abatron config */
1658 lis r6, swapper_pg_dir@h
1659 ori r6, r6, swapper_pg_dir@l
1664 /* Now turn on the MMU for real! */
1667 lis r3,start_kernel@h
1668 ori r3,r3,start_kernel@l
1675 * Set up the segment registers for a new context.
1677 _GLOBAL(set_context)
1678 mulli r3,r3,897 /* multiply context by skew factor */
1679 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1680 addis r3,r3,0x6000 /* Set Ks, Ku bits */
1681 li r0,NUM_USER_SEGMENTS
1684 #ifdef CONFIG_BDI_SWITCH
1685 /* Context switch the PTE pointer for the Abatron BDI2000.
1686 * The PGDIR is passed as second argument.
1688 lis r5, KERNELBASE@h
1697 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1699 #ifdef CONFIG_PPC64BRIDGE
1701 #endif /* CONFIG_PPC64BRIDGE */
1703 addi r3,r3,0x111 /* next VSID */
1704 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1705 addis r4,r4,0x1000 /* address of next segment */
1712 * An undocumented "feature" of 604e requires that the v bit
1713 * be cleared before changing BAT values.
1715 * Also, newer IBM firmware does not clear bat3 and 4 so
1716 * this makes sure it's done.
1722 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1747 1: addic. r20, r20, -0x1000
1754 addi r4, r3, __after_mmu_off - _start
1756 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1765 * Use the first pair of BAT registers to map the 1st 16MB
1766 * of RAM to KERNELBASE. From this point on we can't safely
1770 lis r11,KERNELBASE@h
1771 #ifndef CONFIG_PPC64BRIDGE
1773 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1776 ori r11,r11,4 /* set up BAT registers for 601 */
1777 li r8,0x7f /* valid, block length = 8MB */
1778 oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
1779 oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
1780 mtspr IBAT0U,r11 /* N.B. 601 has valid bit in */
1781 mtspr IBAT0L,r8 /* lower BAT register */
1786 #endif /* CONFIG_PPC64BRIDGE */
1790 ori r8,r8,0x12 /* R/W access, M=1 */
1792 ori r8,r8,2 /* R/W access */
1793 #endif /* CONFIG_SMP */
1795 ori r11,r11,BL_8M<<2|0x2 /* set up 8MB BAT registers for 604 */
1797 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1798 #endif /* CONFIG_APUS */
1800 #ifdef CONFIG_PPC64BRIDGE
1801 /* clear out the high 32 bits in the BAT */
1804 #endif /* CONFIG_PPC64BRIDGE */
1805 mtspr DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1806 mtspr DBAT0U,r11 /* bit in upper BAT register */
1812 #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT)
1815 * setup the display bat prepared for us in prom.c
1820 addis r8,r3,disp_BAT@ha
1821 addi r8,r8,disp_BAT@l
1825 rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */
1835 #endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */
1838 /* Jump into the system reset for the rom.
1839 * We first disable the MMU, and then jump to the ROM reset address.
1841 * r3 is the board info structure, r4 is the location for starting.
1842 * I use this for building a small kernel that can load other kernels,
1843 * rather than trying to write or rely on a rom monitor that can tftp load.
1848 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1854 ori r10,r10,HID0_ICE|HID0_DCE
1860 addis r6,r6,-KERNELBASE@h
1874 * We put a few things here that have to be page-aligned.
1875 * This stuff goes at the beginning of the data segment,
1876 * which is page-aligned.
1881 .globl empty_zero_page
1885 .globl swapper_pg_dir
1890 * This space gets a copy of optional info passed to us by the bootstrap
1891 * Used to pass parameters into the kernel like root=/dev/sda1, etc.
1897 .globl intercept_table
1899 .long 0, 0, i0x200, i0x300, i0x400, 0, i0x600, i0x700
1900 .long i0x800, 0, 0, 0, 0, i0xd00, 0, 0
1901 .long 0, 0, 0, i0x1300, 0, 0, 0, 0
1902 .long 0, 0, 0, 0, 0, 0, 0, 0
1903 .long 0, 0, 0, 0, 0, 0, 0, 0
1904 .long 0, 0, 0, 0, 0, 0, 0, 0
1906 #ifdef CONFIG_BDI_SWITCH
1907 /* Room for two PTE pointers, usually the kernel and current user pointers
1908 * to their respective root page table.