2 * arch/ppc/kernel/iSeries_head.S
4 * Adapted from arch/ppc/kernel/head.S
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
11 * Adapted for Power Macintosh by Paul Mackerras.
12 * Low-level exception handlers and MMU support
13 * rewritten by Paul Mackerras.
14 * Copyright (C) 1996 Paul Mackerras.
15 * Adapted for iSeries by Mike Corrigan
16 * Updated by Dave Boutcher
18 * This file contains the low-level support and setup for the
19 * iSeries LPAR platform.
21 * This program is free software; you can redistribute it and/or
22 * modify it under the terms of the GNU General Public License
23 * as published by the Free Software Foundation; either version
24 * 2 of the License, or (at your option) any later version.
28 #include <linux/config.h>
29 #include <asm/processor.h>
32 #include <asm/pgtable.h>
33 #include <asm/ppc_asm.h>
35 #include "iSeries_asm.h"
44 * In an iSeries partition, the operating system has no direct access
45 * to the hashed page table. The iSeries hypervisor manages the
46 * hashed page table, and is directed by the operating system in the
47 * partition. The partition, Linux in this case, always runs with
48 * MSR.IR and MSR.DR equal to 1. The hypervisor establishes
49 * addressibility for the first 64 MB of memory at 0xC0000000 by
50 * building a hashed page table and setting segment register 12.
52 * The partition memory is not physically contiguous, nor necessarily
53 * addressable with a 32-bit address. The hypervisor provides functions
54 * which the kernel can use to discover the layout of memory. The
55 * iSeries LPAR specific code in the kernel will build a table that maps
56 * contiguous pseudo-real addresses starting at zero to the actual
57 * physical addresses owned by this partition. In 32-bit mode we will
58 * restrict ourselves to no more than 768 MB (or maybe 1 GB)
60 * When Linux interrupt handlers get control, the hypervisor has
61 * already saved SRR0 and SRR1 into a control block shared between
62 * the hypervisor and Linux. This is know as the ItLpPaca. The values
63 * in the actual SRR0 and SRR1 are not valid. This requires a change in
64 * the way the SPRG registers are used. The definitions are:
66 * Register old definition new definition
68 * SPRG0 temp - used to save gpr reserved for hypervisor
69 * SPRG1 temp - used to save gpr addr of Paca
70 * SPRG2 0 or kernel stack frame temp - used to save gpr
71 * SPRG3 Linux thread Linux thread
73 * The Paca contains the address of the ItLpPaca. The Paca is known only
74 * to Linux, while the ItLpPaca is shared between Linux and the
77 * The value that used to be in SPRG2 will now be saved in the Paca,
78 * as will at least one GPR.
88 /* iSeries LPAR hypervisor expects a 64-bit offset of
89 the hvReleaseData structure (see HvReleaseData.h)
90 at offset 0x20. This is the base for all common
91 control blocks between the hypervisor and the kernel
95 .long hvReleaseData-KERNELBASE
97 .long msChunks-KERNELBASE
99 .long pidhash-KERNELBASE
100 /* Pointer to start of embedded System.map */
102 .globl embedded_sysmap_start
103 embedded_sysmap_start:
105 /* Pointer to end of embedded System.map */
107 .globl embedded_sysmap_end
114 .globl ste_fault_count
117 .globl set_context_count
123 .globl update_times_count
126 .globl update_wall_jiffies_count
127 update_wall_jiffies_count:
129 .globl update_wall_jiffies_ticks
130 update_wall_jiffies_ticks:
135 * We assume SPRG1 has the address of the Paca and SPRG3
136 * has the address of the task's thread_struct.
137 * SPRG2 is used as a scratch register (as required by the
138 * hypervisor). SPRG0 is reserved for the hypervisor.
140 * The ItLpPaca has the values of SRR0 and SRR1 that the
141 * hypervisor saved at the point of the actual interrupt.
143 * The Paca contains the value that the non-LPAR PPC Linux Kernel
144 * keeps in SPRG2, which is either zero (if the interrupt
145 * occurred in the kernel) or the address of the available
146 * space on the kernel stack (if the interrupt occurred
149 #define EXCEPTION_PROLOG_1 \
150 mtspr SPRG2,r20; /* use SPRG2 as scratch reg */\
151 mfspr r20,SPRG1; /* get Paca */\
152 /* must do std not stw because soft disable protects \
153 * 64-bit register use (in HvCall, maybe others) \
155 std r21,PACAR21(r20); /* Save GPR21 in Paca */\
156 std r22,PACAR22(r20); /* Save GPR22 in Paca */\
157 mfcr r22 /* Get CR */
159 #define EXCEPTION_PROLOG_2 \
160 lwz r21,PACAKSAVE(r20); /* exception stack to use */\
161 cmpwi 0,r21,0; /* user mode or kernel */\
162 bne 1f; /* 0 -> r1, else use PACAKSAVE */\
163 subi r21,r1,INT_FRAME_SIZE; /* alloc exc. frame */\
164 1: stw r1,GPR1(r21); \
166 stw r22,_CCR(r1); /* save CR in stackframe */ \
168 stw r22,_LINK(r1); /* Save LR in stackframe */ \
169 bl save_regs; /* now save everything else */ \
170 ld r22,PACALPPACA+LPPACASRR0(r20); /* Get SRR0 from ItLpPaca */\
171 ld r23,PACALPPACA+LPPACASRR1(r20) /* Get SRR1 from ItLpPaca */
173 #define EXCEPTION_PROLOG_EXIT \
175 ld r22,PACALPPACA+LPPACASRR0(r20); \
176 ld r21,PACALPPACA+LPPACASRR1(r20); \
179 ld r22,PACAR22(r20); \
180 ld r21,PACAR21(r20); \
184 #define EXCEPTION_PROLOG \
185 EXCEPTION_PROLOG_1; \
190 * Note: code which follows this uses cr0.eq (set if from kernel),
191 * r21, r22 (SRR0), and r23 (SRR1).
197 #define STD_EXCEPTION(n, label, hdlr) \
201 addi r3,r1,STACK_FRAME_OVERHEAD; \
202 li r20,0; /* soft disabled */\
203 bl transfer_to_handler; \
205 .long ret_from_except
210 mfspr r3,SPRG3 /* Get Paca address */
211 mtspr SPRG1,r3 /* Set Linux SPRG1 -> Paca */
212 lhz r24,PACAPACAINDEX(r3) /* Get processor # */
213 cmpi 0,r24,0 /* Are we processor 0? */
214 beq start_here /* Start up the first processor */
217 andc r4,r4,r5 /* Turn off the run light */
222 lbz r23,PACAPROCSTART(r3) /* Test if this processor
227 /* Let the Hypervisor know we are alive */
228 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
230 rldicr r0,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
231 rldicl r3,r3,0,48 /* r3 = r3 & 0x000000000000ffff */
232 or r3,r3,r0 /* r3 = r3 | r0 */
233 #else /* CONFIG_SMP */
234 /* Yield the processor. This is required for non-SMP kernels
235 which are running on multi-threaded machines. */
237 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
238 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
239 li r4,0 /* "yield timed" */
240 li r5,-1 /* "yield forever" */
241 #endif /* CONFIG_SMP */
242 li r0,-1 /* r0=-1 indicates a Hypervisor call */
243 sc /* Invoke the hypervisor via a system call */
244 mfspr r3,SPRG1 /* Put r3 back */
245 b 1b /* If SMP not configured, secondaries
249 STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
251 /* Data access exception. */
260 andis. r0,r5,0x0020 /* Is this a segment fault? */
261 bne ste_fault /* Yes - go reload segment regs */
263 /* This should and with 0xd7ff */
264 andis. r0,r5,0xa470 /* Can we handle as little fault? */
267 rlwinm r3,r5,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
270 * r3 contains the required access permissions
271 * r4 contains the faulting address
274 stw r22,_NIP(r1) /* Help with debug if dsi loop */
275 bl hash_page /* Try to handle as hpte fault */
276 lwz r4,_DAR(r1) /* Get original DAR */
277 lwz r5,_DSISR(r1) /* and original DSISR */
279 1: addi r3,r1,STACK_FRAME_OVERHEAD
281 bl transfer_to_handler
283 .long ret_from_except
286 /* Instruction access exception. */
293 andis. r0,r23,0x0020 /* Is this a segment fault? */
294 bne ste_fault /* Yes - go reload segment regs */
296 andis. r0,r23,0x4000 /* no pte found? */
297 beq 1f /* if so, try to put a PTE */
300 bl hash_page /* Try to handle as hpte fault */
304 1: addi r3,r1,STACK_FRAME_OVERHEAD
306 bl transfer_to_handler
308 .long ret_from_except
310 /* External interrupt */
314 lbz r21,PACAPROCENABLED(r20)
317 EXCEPTION_PROLOG_EXIT
318 1: EXCEPTION_PROLOG_2
320 addi r3,r1,STACK_FRAME_OVERHEAD
322 li r20,0 /* Soft disabled */
323 bl transfer_to_handler
324 .globl do_IRQ_intercept
327 .long ret_from_intercept
329 /* Alignment exception */
337 addi r3,r1,STACK_FRAME_OVERHEAD
338 lbz r20,PACAPROCENABLED(r20) /* preserve soft en/disabled */
339 bl transfer_to_handler
340 .long AlignmentException
341 .long ret_from_except
343 /* Program check exception */
347 addi r3,r1,STACK_FRAME_OVERHEAD
348 lbz r20,PACAPROCENABLED(r20) /* preserve soft en/disabled */
349 bl transfer_to_handler
350 .long ProgramCheckException
351 .long ret_from_except
353 /* Floating-point unavailable */
357 lwz r3,PACAKSAVE(r20)
362 li r20,0 /* soft disabled */
363 bl transfer_to_handler /* if from kernel, take a trap */
365 .long ret_from_except
370 lbz r21,PACAPROCENABLED(r20)
375 stb r21,PACALPPACA+LPPACADECRINT(r20)
376 lwz r21,PACADEFAULTDECR(r20)
378 EXCEPTION_PROLOG_EXIT
379 1: EXCEPTION_PROLOG_2
380 addi r3,r1,STACK_FRAME_OVERHEAD
381 li r20,0 /* Soft disabled */
382 bl transfer_to_handler
383 .globl timer_interrupt_intercept
384 timer_interrupt_intercept:
385 .long timer_interrupt
386 .long ret_from_intercept
388 STD_EXCEPTION(0xa00, Trap_0a, UnknownException)
389 STD_EXCEPTION(0xb00, Trap_0b, UnknownException)
395 /* Store r3 to the kernel stack */
397 lbz r20,PACAPROCENABLED(r20) /* preserve soft en/disabled */
398 bl transfer_to_handler
400 .long ret_from_except
402 /* Single step - not used on 601 */
403 STD_EXCEPTION(0xd00, SingleStep, SingleStepException)
405 STD_EXCEPTION(0xe00, Trap_0e, UnknownException)
406 STD_EXCEPTION(0xf00, Trap_0f, UnknownException)
408 STD_EXCEPTION(0x1300, Trap_13, InstructionBreakpoint)
410 STD_EXCEPTION(0x1400, SMI, SMIException)
411 STD_EXCEPTION(0x1500, Trap_15, UnknownException)
412 STD_EXCEPTION(0x1600, Trap_16, UnknownException)
413 STD_EXCEPTION(0x1700, Trap_17, TAUException)
414 STD_EXCEPTION(0x1800, Trap_18, UnknownException)
415 STD_EXCEPTION(0x1900, Trap_19, UnknownException)
416 STD_EXCEPTION(0x1a00, Trap_1a, UnknownException)
417 STD_EXCEPTION(0x1b00, Trap_1b, UnknownException)
418 STD_EXCEPTION(0x1c00, Trap_1c, UnknownException)
419 STD_EXCEPTION(0x1d00, Trap_1d, UnknownException)
420 STD_EXCEPTION(0x1e00, Trap_1e, UnknownException)
421 STD_EXCEPTION(0x1f00, Trap_1f, UnknownException)
422 STD_EXCEPTION(0x2000, RunMode, RunModeException)
423 STD_EXCEPTION(0x2100, Trap_21, UnknownException)
424 STD_EXCEPTION(0x2200, Trap_22, UnknownException)
425 STD_EXCEPTION(0x2300, Trap_23, UnknownException)
426 STD_EXCEPTION(0x2400, Trap_24, UnknownException)
427 STD_EXCEPTION(0x2500, Trap_25, UnknownException)
428 STD_EXCEPTION(0x2600, Trap_26, UnknownException)
429 STD_EXCEPTION(0x2700, Trap_27, UnknownException)
430 STD_EXCEPTION(0x2800, Trap_28, UnknownException)
431 STD_EXCEPTION(0x2900, Trap_29, UnknownException)
432 STD_EXCEPTION(0x2a00, Trap_2a, UnknownException)
433 STD_EXCEPTION(0x2b00, Trap_2b, UnknownException)
434 STD_EXCEPTION(0x2c00, Trap_2c, UnknownException)
435 STD_EXCEPTION(0x2d00, Trap_2d, UnknownException)
436 STD_EXCEPTION(0x2e00, Trap_2e, UnknownException)
437 STD_EXCEPTION(0x2f00, Trap_2f, UnknownException)
441 /* This code saves: CTR, XER, DAR, DSISR, SRR0, SRR1, */
442 /* r0, r2-r13, r20-r24 */
443 /* It uses R22 as a scratch register */
445 ld r22,PACAR21(r20) /* Get GPR21 from Paca */
446 stw r22,GPR21(r1) /* Save GPR21 in stackframe */
447 ld r22,PACAR22(r20) /* Get GPR22 from Paca */
448 stw r22,GPR22(r1) /* Save GPR22 in stackframe */
449 stw r23,GPR23(r1) /* Save GPR23 in stackframe */
450 stw r24,GPR24(r1) /* Save GPR24 in stackframe */
451 mfspr r22,SPRG2 /* Get GPR20 from SPRG2 */
452 stw r22,GPR20(r1) /* Save GPR20 in stackframe */
457 lbz r22,PACAPROCENABLED(r20)/* Get soft enabled/disabled */
465 bl set_kernel_segregs
469 stb r4,PACAPROCENABLED(r3) /* Soft disable prevents going to */
470 /* do_pending_int on recursive fault */
472 lis r3,ste_fault_count@ha
473 lwz r4,ste_fault_count@l(r3)
475 stw r4,ste_fault_count@l(r3)
477 mfspr r3,SPRG3 /* get thread */
478 addi r3,r3,-THREAD /* get 'current' */
479 lwz r3,MM(r3) /* get mm */
480 cmpi 0,r3,0 /* if no mm */
481 beq 1f /* then use context 0 (kernel) */
482 lwz r3,CONTEXT(r3) /* get context */
484 /* set_context kills r0, r3, r4 and CTR */
489 beq 5f /* skip checks if restoring disabled */
491 CHECKANYINT(r4,r5,r6) /* if pending interrupts, process them */
495 stb r3,PACAPROCENABLED(r4) /* Restore enabled/disabled */
500 * This code finishes saving the registers to the exception frame
501 * and jumps to the appropriate handler for the exception, turning
502 * on address translation.
504 * At this point r0-r13, r20-r24, CCR, CTR, LINK, XER, DAR and DSISR
505 * are saved on a stack. SRR0 is in r22, SRR1 is in r23
506 * r1 points to the stackframe, r1 points to the kernel stackframe
507 * We no longer have any dependency on data saved in the PACA, SRR0, SRR1
508 * DAR or DSISR. We now copy the registers to the kernel stack (which
509 * might cause little faults). Any little fault will be handled without
510 * saving state. Thus when the little fault is completed, it will rfi
511 * back to the original faulting instruction.
513 .globl transfer_to_handler
518 stw r7,PACAKSAVE(r6) /* Force new frame for recursive fault */
520 /* Restore the regs used above -- parameters to syscall */
533 mfspr r23,SPRG3 /* if from user, fix up THREAD.regs */
535 addi r24,r1,STACK_FRAME_OVERHEAD
537 2: addi r2,r23,-THREAD /* set r2 to current */
539 stwcx. r22,r22,r1 /* to clear the reservation */
542 mfspr r23,SPRG1 /* Get Paca address */
543 stb r20,PACAPROCENABLED(r23) /* soft enable or disabled */
545 andi. r24,r23,0x3f00 /* get vector offset */
547 addi r24,r2,TASK_STRUCT_SIZE /* check for kernel stack overflow */
551 bgt- stack_ovf /* if r2 < r1 < r2+TASK_STRUCT_SIZE */
552 lwz r24,0(r23) /* virtual address of handler */
553 lwz r23,4(r23) /* where to go when done */
555 ori r20,r20,MSR_EE /* Always hard enabled */
560 RFI /* jump to handler, enable MMU */
563 * On kernel stack overflow, load up an initial stack pointer
564 * and call StackOverflow(regs), which should not return.
567 addi r3,r1,STACK_FRAME_OVERHEAD
568 lis r1,init_task_union@ha
569 addi r1,r1,init_task_union@l
570 addi r1,r1,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD
573 stb r20,PACAPROCENABLED(r24) /* soft disable */
574 lis r24,StackOverflow@ha
575 addi r24,r24,StackOverflow@l
577 ori r20,r20,MSR_EE /* Always hard enabled */
584 * Disable FP for the task which had the FPU previously,
585 * and save its floating-point registers in its thread_struct.
586 * Enables the FPU for use in the kernel on return.
587 * On SMP we know the fpu is free, since we give it up every
589 * Assume r20 points to PACA on entry
595 MTMSRD(r5) /* enable use of fpu now */
598 * For SMP, we don't do lazy FPU switching because it just gets too
599 * horrendously complex, especially when a task switches from one CPU
600 * to another. Instead we call giveup_fpu in switch_to.
603 lis r3,last_task_used_math@ha
604 lwz r4,last_task_used_math@l(r3)
607 addi r4,r4,THREAD /* want last_task_used_math->thread */
610 stfd fr0,THREAD_FPSCR-4(r4)
612 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
613 li r20,MSR_FP|MSR_FE0|MSR_FE1
614 andc r4,r4,r20 /* disable FP for previous task */
615 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
617 #endif /* CONFIG_SMP */
618 /* enable use of FP after return */
619 ori r23,r23,MSR_FP|MSR_FE0|MSR_FE1
620 mfspr r5,SPRG3 /* current task's THREAD (phys) */
621 lfd fr0,THREAD_FPSCR-4(r5)
626 stw r4,last_task_used_math@l(r3)
627 #endif /* CONFIG_SMP */
628 /* restore registers and return */
635 /* we haven't used ctr or xer */
646 * FP unavailable trap from kernel - print a message, but let
647 * the task use FP in the kernel until it returns to user mode.
652 stw r3,_MSR(r1) /* enable use of FP after return */
655 mr r4,r2 /* current */
659 86: .string "floating point used in kernel (task=%p, pc=%x)\n"
664 * Disable FP for the task given as the argument,
665 * and save the floating-point registers in its thread_struct.
666 * Enables the FPU for use in the kernel on return.
672 mtmsr r5 /* enable use of fpu now */
674 beqlr- /* if no previous owner, done */
675 addi r3,r3,THREAD /* want THREAD of task */
680 stfd fr0,THREAD_FPSCR-4(r3)
682 lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
683 li r3,MSR_FP|MSR_FE0|MSR_FE1
684 andc r4,r4,r3 /* disable FP for previous task */
685 stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
689 lis r4,last_task_used_math@ha
690 stw r5,last_task_used_math@l(r4)
691 #endif /* CONFIG_SMP */
699 bl call_setup_cpu /* Call setup_cpu for this CPU */
702 HMT_MEDIUM /* Set thread priority to MEDIUM */
704 ori r2,r2,current_set@l
705 slwi r24,r24,2 /* get current_set[cpu#] */
709 addi r1,r2,TASK_UNION_SIZE-STACK_FRAME_OVERHEAD
713 /* load up the MMU */
716 /* ptr to phys current thread */
717 addi r4,r2,THREAD /* phys address of our thread_struct */
721 /* Set up address of Paca in current thread */
724 /* r24 has CPU # * 4 at this point. The Paca is 2048 bytes
725 long so multiply r24 by 512 to index into the array of Pacas */
728 rlwinm r23,r23,0,0,31
732 stw r3,PACAKSAVE(r23) /* 0 => r1 has kernel sp */
734 stb r3,PACAPROCENABLED(r23) /* Soft disabled */
736 /* enable MMU and jump to start_secondary */
739 ori r4,r4,MSR_EE /* Hard enabled */
740 lis r3,start_secondary@h
741 ori r3,r3,start_secondary@l
746 #endif /* CONFIG_SMP */
749 * Load stuff into the MMU. Intended to be called with
753 li r0,16 /* load up segment register values */
754 mtctr r0 /* for context 0 */
755 lis r3,0x2000 /* Ku = 1, VSID = 0 */
758 addi r3,r3,0x111 /* increment VSID */
759 addis r4,r4,0x1000 /* address of next segment */
764 * This is where the main kernel code starts.
770 lis r2,init_task_union@h
771 ori r2,r2,init_task_union@l
773 /* Set up for using our exception vectors */
775 addi r4,r2,THREAD /* init task's THREAD */
779 /* Get address of Paca for processor 0 */
782 rlwinm r11,r11,0,0,31
786 stw r3,PACAKSAVE(r11) /* 0 => r1 has kernel sp */
788 stb r3,PACAPROCENABLED(r11) /* Soft disabled */
790 addi r1,r2,TASK_UNION_SIZE
792 stwu r0,-STACK_FRAME_OVERHEAD(r1)
794 /* fix klimit for system map */
795 lis r6,embedded_sysmap_end@ha
796 lwz r7,embedded_sysmap_end@l(r6)
812 * Decide what sort of machine this is and initialize the MMU.
814 bl early_init /* We have to do this with MMU on */
821 li r6,0 /* No cmdline parameters */
828 ori r4,r4,MSR_EE /* Hard enabled */
830 lis r3,start_kernel@h
831 ori r3,r3,start_kernel@l
834 RFI /* ensure correct MSR and jump to
837 mflr r21 /* Save LR in r21 */
840 * We hard enable here (but first soft disable) so that the hash_page
841 * code can spin on the hash_table_lock without problem on a shared
845 stb r0,PACAPROCENABLED(r20) /* Soft disable */
849 mtmsr r0 /* Hard enable */
851 bl create_hpte /* add the hash table entry */
853 * Now go back to hard disabled
859 mtmsr r0 /* Hard disable */
862 mtlr r21 /* restore LR */
863 mr r21,r1 /* restore r21 */
865 cmpi 0,r0,0 /* See if we will soft enable in */
867 beq 5f /* if not, skip checks */
869 CHECKANYINT(r4,r5,r6) /* if pending interrupts, process them */
872 5: stb r0,PACAPROCENABLED(r20) /* Restore soft enable/disable */
874 cmpi 0,r3,0 /* check return code form create_hpte */
878 * htab_reloads counts the number of times we have to fault an
879 * HPTE into the hash table. This should only happen after a
880 * fork (because fork does a flush_tlb_mm) or a vmalloc or ioremap.
881 * Where a page is faulted into a process's address space,
882 * update_mmu_cache gets called to put the HPTE into the hash table
883 * and those are counted as preloads rather than reloads.
885 lis r2,htab_reloads@ha
886 lwz r3,htab_reloads@l(r2)
888 stw r3,htab_reloads@l(r2)
914 * Set up the segment registers for a new context.
918 mulli r3,r3,897 /* multiply context by skew factor */
919 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
920 addis r3,r3,0x6000 /* Set Ks, Ku bits */
921 li r0,NUM_USER_SEGMENTS
927 addi r3,r3,0x111 /* next VSID */
928 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
929 addis r4,r4,0x1000 /* address of next segment */
936 * Reload the last four segment registers because they
937 * might have been clobbered by the hypervisor if we
938 * are running on a shared processor
940 lis r3,0x2000 /* Set Ku = 1 */
941 addi r3,r3,0xCCC /* Set VSID = CCC */
942 lis r4,0xC000 /* Set SR = C */
943 li r0,4 /* Load regs C, D, E and F */
946 addi r3,r3,0x111 /* increment VSID */
947 addis r4,r4,0x1000 /* address of next segment */
956 * Invoke the iSeries hypervisor (PLIC) via the System Call instruction.
957 * Parameters are passed to this routine in registers r3 - r10 and are
958 * converted to 64-bit by combining registers. eg. r3 <- r3
959 * r4 <- r5,r6, r5 <- r7,r8, r6 <- r9,r10
961 * r3 contains the HV function to be called
962 * r5,r6 contain the first 64-bit operand
963 * r7,r8 contain the second 64-bit operand
964 * r9,r10 contain the third 64-bit operand
965 * caller's stack frame +8 contains the fourth 64-bit operand
966 * caller's stack frame +16 contains the fifth 64-bit operand
967 * caller's stack frame +24 contains the sixth 64-bit operand
968 * caller's stack frame +32 contains the seventh 64-bit operand
982 * Stack a frame and save one reg so we can hang on to
998 * The hypervisor assumes CR fields 0-4 are volatile, but
999 * gcc assumes CR fields 2-7 are non-volatile.
1000 * We must save and restore the CR here
1005 /* Before we can convert to using 64-bit registers we must
1006 * soft disable external interrupts as the interrupt handlers
1007 * don't preserve the high half of the registers
1010 mfspr r11,SPRG1 /* Get the Paca pointer */
1011 lbz r31,PACAPROCENABLED(r11) /* Get soft enable/disable flag */
1013 stb r0,PACAPROCENABLED(r11) /* Soft disable */
1015 /* Get parameters four through seven */
1025 /* Now it is safe to operate on 64-bit registers
1027 * Format the operands into the 64-bit registers
1030 rldicr r5,r5,32,31 /* r5 = r5 << 32 */
1031 rldicl r6,r6,0,32 /* r6 = r6 & 0x00000000ffffffff */
1032 or r4,r5,r6 /* r4 = r5 | r6 */
1033 rldicr r7,r7,32,31 /* r7 = r7 << 32 */
1034 rldicl r8,r8,0,32 /* r8 = r8 & 0x00000000ffffffff */
1035 or r5,r7,r8 /* r5 = r7 | r8 */
1036 rldicr r9,r9,32,31 /* r9 = r9 << 32 */
1037 rldicl r10,r10,0,32 /* r10 = r10 & 0x00000000ffffffff */
1038 or r6,r9,r10 /* r6 = r9 | r10 */
1039 rldicr r22,r22,32,31 /* r22 = r22 << 32 */
1040 rldicl r23,r23,0,32 /* r23 = r23 & 0x00000000ffffffff */
1041 or r7,r22,r23 /* r7 = r22 | r23 */
1042 rldicr r24,r24,32,31 /* r24 = r24 << 32 */
1043 rldicl r25,r25,0,32 /* r25 = r25 & 0x00000000ffffffff */
1044 or r8,r24,r25 /* r8 = r24 | r25 */
1045 rldicr r26,r26,32,31 /* r26 = r26 << 32 */
1046 rldicl r27,r27,0,32 /* r27 = r27 & 0x00000000ffffffff */
1047 or r9,r26,r27 /* r9 = r26 | r27 */
1048 rldicr r28,r28,32,31 /* r28 = r28 << 32 */
1049 rldicl r29,r29,0,32 /* r29 = r29 & 0x00000000ffffffff */
1050 or r10,r28,r29 /* r10 = r28 | r29 */
1053 * Extract the hypervisor function call number from R3
1054 * and format it into the 64-bit R3.
1056 rldicr r0,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
1057 rldicl r3,r3,0,48 /* r3 = r3 & 0x000000000000ffff */
1058 or r3,r3,r0 /* r3 = r3 | r0 */
1061 * r0 = 0xffffffffffffffff indicates a hypervisor call
1063 li r0,-1 /* r1 = 0xffffffffffffffff */
1065 /* Invoke the hypervisor via the System Call instruction */
1071 /* Return value in 64-bit R3
1072 * format it into R3 and R4
1074 rldicl r4,r3,0,32 /* r4 = r3 & 0x00000000ffffffff */
1075 rldicl r3,r3,32,32 /* r3 = (r3 >> 32) & 0x00000000ffffffff */
1077 /* We are now done with 64-bit registers it is safe to touch
1089 /* While we were running in the hypervisor, a decrementer or
1090 * external interrupt may have occured. If we are about to
1091 * enable here, we must check for these and process them
1094 cmpi 0,r31,0 /* check if going to enable */
1095 beq 1f /* skip checks if staying disabled */
1097 /* Save r3, r4 and LR */
1103 /* enable and check for decrementers/lpEvents */
1107 /* Restore r3, r4 and LR */
1115 * Unstack the frame and restore r31 and the CR
1124 /* Hypervisor call with return data
1126 * Invoke the iSeries hypervisor (PLIC) via the System Call instruction.
1127 * The Hv function ID is passed in r3
1128 * The address of the return data structure is passed in r4
1129 * Parameters are passed to this routine in registers r5 - r10 and are
1130 * converted to 64-bit by combining registers. eg. r3 <- r3
1131 * r4 <- r5,r6, r5 <- r7,r8, r6 <- r9,r10
1133 * r3 contains the HV function to be called
1134 * r4 contains the address of the return data structure
1135 * r5,r6 contain the first 64-bit operand
1136 * r7,r8 contain the second 64-bit operand
1137 * r9,r10 contain the third 64-bit operand
1138 * caller's stack frame +8 contains the fourth 64-bit operand
1139 * caller's stack frame +16 contains the fifth 64-bit operand
1140 * caller's stack frame +24 contains the sixth 64-bit operand
1141 * caller's stack frame +32 contains the seventh 64-bit operand
1145 _GLOBAL(HvCallRet16)
1146 _GLOBAL(HvCall0Ret16)
1147 _GLOBAL(HvCall1Ret16)
1148 _GLOBAL(HvCall2Ret16)
1149 _GLOBAL(HvCall3Ret16)
1150 _GLOBAL(HvCall4Ret16)
1151 _GLOBAL(HvCall5Ret16)
1152 _GLOBAL(HvCall6Ret16)
1153 _GLOBAL(HvCall7Ret16)
1156 * Stack a frame and save some regs
1171 mr r30,r4 /* Save return data address */
1174 * The hypervisor assumes CR fields 0-4 are volatile, but
1175 * gcc assumes CR fields 2-7 are non-volatile.
1176 * We must save and restore the CR here
1181 /* Before we can convert to using 64-bit registers we must
1182 * soft disable external interrupts as the interrupt handlers
1183 * don't preserve the high half of the registers
1186 mfspr r11,SPRG1 /* Get the Paca pointer */
1187 lbz r31,PACAPROCENABLED(r11) /* Get soft enable/disable flag */
1189 stb r0,PACAPROCENABLED(r11) /* Soft disable */
1191 /* Get parameters four through seven */
1202 /* Now it is safe to operate on 64-bit registers
1207 * Format the operands into the 64-bit registers
1210 rldicr r5,r5,32,31 /* r5 = r5 << 32 */
1211 rldicl r6,r6,0,32 /* r6 = r6 & 0x00000000ffffffff */
1212 or r4,r5,r6 /* r4 = r5 | r6 */
1213 rldicr r7,r7,32,31 /* r7 = r7 << 32 */
1214 rldicl r8,r8,0,32 /* r8 = r8 & 0x00000000ffffffff */
1215 or r5,r7,r8 /* r5 = r7 | r8 */
1216 rldicr r9,r9,32,31 /* r9 = r9 << 32 */
1217 rldicl r10,r10,0,32 /* r10 = r10 & 0x00000000ffffffff */
1218 or r6,r9,r10 /* r6 = r9 | r10 */
1219 rldicr r22,r22,32,31 /* r22 = r22 << 32 */
1220 rldicl r23,r23,0,32 /* r23 = r23 & 0x00000000ffffffff */
1221 or r7,r22,r23 /* r7 = r22 | r23 */
1222 rldicr r24,r24,32,31 /* r24 = r24 << 32 */
1223 rldicl r25,r25,0,32 /* r25 = r25 & 0x00000000ffffffff */
1224 or r8,r24,r25 /* r8 = r24 | r25 */
1225 rldicr r26,r26,32,31 /* r26 = r26 << 32 */
1226 rldicl r27,r27,0,32 /* r27 = r27 & 0x00000000ffffffff */
1227 or r9,r26,r27 /* r9 = r26 | r27 */
1228 rldicr r28,r28,32,31 /* r28 = r28 << 32 */
1229 rldicl r29,r29,0,32 /* r29 = r29 & 0x00000000ffffffff */
1230 or r10,r28,r29 /* r10 = r28 | r29 */
1232 * Extract the hypervisor function call number from R3
1233 * and format it into the 64-bit R3.
1235 rldicr r0,r3,32,15 /* r4 = (r3 << 32) & 0xffff000000000000 */
1236 rldicl r3,r3,0,48 /* r3 = r3 & 0x000000000000ffff */
1237 or r3,r3,r0 /* r3 = r3 | r4 */
1240 * r0 = 0xffffffffffffffff indicates a hypervisor call
1242 li r0,-1 /* r1 = 0xffffffffffffffff */
1244 /* Invoke the hypervisor via the System Call instruction */
1250 /* Return values in 64-bit R3, R4, R5 and R6
1251 * place R3 and R4 into data structure, R5 into R3,R4
1253 rldicl r6,r3,32,32 /* r6 = (r3 >> 32) & 0x00000000ffffffff */
1254 rldicl r7,r3,0,32 /* r7 = r3 & 0x00000000ffffffff */
1255 rldicl r8,r4,32,32 /* r8 = (r4 >> 32) & 0x00000000ffffffff */
1256 rldicl r9,r4,0,32 /* r9 = r4 & 0x00000000ffffffff */
1258 rldicl r4,r5,0,32 /* r4 = r5 & 0x00000000ffffffff */
1259 rldicl r3,r5,32,32 /* r3 = (r5 >> 32) & 0x00000000ffffffff */
1261 /* We are now done with 64-bit registers it is safe to touch
1264 stw r6,0(r30) /* Save returned data */
1278 /* While we were running in the hypervisor, a decrementer or
1279 * external interrupt may have occured. If we are about to
1280 * enable here, we must check for these and process them
1283 cmpi 0,r31,0 /* check if going to enable */
1284 beq 1f /* skip checks if staying disabled */
1286 /* Save r3, r4 and LR */
1292 /* enable and check for decrementers/lpEvents */
1303 * Unstack the frame and restore r30, r31 and CR
1314 /* Hypervisor call (use no stack)
1316 * These functions must be called with interrupts soft disabled.
1317 * The caller is responsible for saving the non-volatile CR
1318 * The operands should already be formatted into the 64-bit registers
1320 * Invoke the iSeries hypervisor (PLIC) via the System Call instruction.
1322 * r3 contains the HV function to be called
1323 * r4 contains the first 64-bit operand
1324 * r5 contains the second 64-bit operand
1325 * r6 contains the third 64-bit operand
1326 * r7 contains the fourth 64-bit operand
1327 * r8 contains the fifth 64-bit operand
1328 * r9 contains the sixth 64-bit operand
1329 * r10 contains the seventh 64-bit operand
1331 * data is returned in 64-bit registers r3-r6
1341 * Extract the hypervisor function call number from R3
1342 * and format it into the 64-bit R3.
1344 rldicr r0,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
1345 rldicl r3,r3,0,48 /* r3 = r3 & 0x000000000000ffff */
1346 or r3,r3,r0 /* r3 = r3 | r0 */
1349 * r0 = 0xffffffffffffffff indicates a hypervisor call
1351 li r0,-1 /* r1 = 0xffffffffffffffff */
1353 /* Invoke the hypervisor via the System Call instruction */
1359 _GLOBAL(__setup_cpu_power3)
1361 _GLOBAL(__setup_cpu_power4)
1363 _GLOBAL(__setup_cpu_generic)
1366 _GLOBAL(iSeries_check_intr)
1372 lbz r5,PACAPROCENABLED(r5)
1375 /* Check for lost decrementer interrupts.
1376 * (If decrementer popped while we were in the hypervisor)
1377 * (calls timer_interrupt if so)
1380 /* Check for pending interrupts. If no interrupts pending,
1381 * then CR0 = "eq" and r4 == 0
1382 * (kills registers r5 and r6)
1385 addi r3,r1,STACK_FRAME_OVERHEAD
1388 CHECKLPQUEUE(r4,r5,r6)
1390 addi r3,r1,STACK_FRAME_OVERHEAD
1398 * Fake an interrupt from kernel mode.
1399 * This is used when enable_irq loses an interrupt.
1400 * We only fill in the stack frame minimally.
1402 _GLOBAL(fake_interrupt)
1405 stwu r1,-INT_FRAME_SIZE(r1)
1412 addi r3,r1,STACK_FRAME_OVERHEAD
1415 addi r1,r1,INT_FRAME_SIZE
1422 * Fake a decrementer from kernel mode.
1423 * This is used when the decrementer pops in
1424 * the hypervisor. We only fill in the stack
1427 _GLOBAL(fake_decrementer)
1430 stwu r1,-INT_FRAME_SIZE(r1)
1437 addi r3,r1,STACK_FRAME_OVERHEAD
1439 addi r1,r1,INT_FRAME_SIZE
1445 _GLOBAL(create_hpte)
1446 stwu r1,-INT_FRAME_SIZE(r1)
1450 /* r3-r13 are caller saved */
1464 bl iSeries_create_hpte
1477 addi r1,r1,INT_FRAME_SIZE
1481 ### extern void abort(void)
1483 ### Invoke the hypervisor to kill the partition.
1490 * We put a few things here that have to be page-aligned.
1491 * This stuff goes at the beginning of the data segment,
1492 * which is page-aligned.
1498 .globl empty_zero_page
1502 .globl swapper_pg_dir
1507 * This space gets a copy of optional info passed to us by the bootstrap
1508 * Used to pass parameters into the kernel like root=/dev/sda1, etc.