2 * arch/ia64/kernel/ivt.S
4 * Copyright (C) 2002-2003 Intel Co
5 * Suresh Siddha <suresh.b.siddha@intel.com>
6 * Kenneth Chen <kenneth.w.chen@intel.com>
7 * Fenghua Yu <fenghua.yu@intel.com>
8 * Copyright (C) 1998-2001 Hewlett-Packard Co
9 * Stephane Eranian <eranian@hpl.hp.com>
10 * David Mosberger <davidm@hpl.hp.com>
12 * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
13 * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
16 * This file defines the interruption vector table used by the CPU.
17 * It does not include one entry per possible cause of interruption.
19 * The first 20 entries of the table contain 64 bundles each while the
20 * remaining 48 entries contain only 16 bundles each.
22 * The 64 bundles are used to allow inlining the whole handler for critical
23 * interruptions like TLB misses.
25 * For each entry, the comment is as follows:
27 * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
28 * entry offset ----/ / / / /
29 * entry number ---------/ / / /
30 * size of the entry -------------/ / /
31 * vector name -------------------------------------/ /
32 * interruptions triggering this vector ----------------------/
34 * The table is 32KB in size and must be aligned on 32KB boundary.
35 * (The CPU ignores the 15 lower bits of the address)
37 * Table is based upon EAS2.6 (Oct 1999)
40 #include <linux/config.h>
42 #include <asm/asmmacro.h>
43 #include <asm/break.h>
44 #include <asm/kregs.h>
45 #include <asm/offsets.h>
46 #include <asm/pgtable.h>
47 #include <asm/processor.h>
48 #include <asm/ptrace.h>
49 #include <asm/system.h>
50 #include <asm/unistd.h>
51 #include <asm/errno.h>
54 # define PSR_DEFAULT_BITS psr.ac
56 # define PSR_DEFAULT_BITS 0
61 * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
62 * needed for something else before enabling this...
64 # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
69 #define MINSTATE_VIRT /* needed by minstate.h */
74 mov r19=n;; /* prepare to save predicates */ \
75 br.sptk.many dispatch_to_fault_handler
78 * As we don't (hopefully) use the space available, we need to fill it with
79 * nops. the parameter may be used for debugging and is representing the entry
82 #define BREAK_BUNDLE(a) break.m (a); \
86 * 4 breaks bundles all together
88 #define BREAK_BUNDLE4(a); BREAK_BUNDLE(a); BREAK_BUNDLE(a); BREAK_BUNDLE(a); BREAK_BUNDLE(a)
91 * 8 bundles all together (too lazy to use only 4 at a time !)
93 #define BREAK_BUNDLE8(a); BREAK_BUNDLE4(a); BREAK_BUNDLE4(a)
95 .section .text.ivt,"ax"
97 .align 32768 // align on 32KB boundary
100 /////////////////////////////////////////////////////////////////////////////////////////
101 // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
105 * The VHPT vector is invoked when the TLB entry for the virtual page table
106 * is missing. This happens only as a result of a previous
107 * (the "original") TLB miss, which may either be caused by an instruction
108 * fetch or a data access (or non-access).
110 * What we do here is normal TLB miss handing for the _original_ miss, followed
111 * by inserting the TLB entry for the virtual page table page that the VHPT
112 * walker was attempting to access. The latter gets inserted as long
113 * as both L1 and L2 have valid mappings for the faulting address.
114 * The TLB entry for the original miss gets inserted only if
115 * the L3 entry indicates that the page is present.
117 * do_page_fault gets invoked in the following cases:
118 * - the faulting virtual address uses unimplemented address bits
119 * - the faulting virtual address has no L1, L2, or L3 mapping
121 mov r16=cr.ifa // get address that caused the TLB miss
122 #ifdef CONFIG_HUGETLB_PAGE
127 rsm psr.dt // use physical addressing for data
128 mov r31=pr // save the predicate registers
129 mov r19=IA64_KR(PT_BASE) // get page table base address
130 shl r21=r16,3 // shift bit 60 into sign bit
131 shr.u r17=r16,61 // get the region number into r17
134 #ifdef CONFIG_HUGETLB_PAGE
137 cmp.eq p8,p0=HPAGE_SHIFT,r26
139 (p8) dep r25=r18,r25,2,6
140 (p8) shr r22=r22,HPAGE_SHIFT-PAGE_SHIFT
143 cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
144 shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address
146 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
147 srlz.d // ensure "rsm psr.dt" has taken effect
148 (p6) movl r19=__pa(swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
149 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
150 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
152 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
153 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
154 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
155 shr.u r18=r22,PMD_SHIFT // shift L2 index into position
157 ld8 r17=[r17] // fetch the L1 entry (may be 0)
159 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
160 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
162 (p7) ld8 r20=[r17] // fetch the L2 entry (may be 0)
163 shr.u r19=r22,PAGE_SHIFT // shift L3 index into position
165 (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was L2 entry NULL?
166 dep r21=r19,r20,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
168 (p7) ld8 r18=[r21] // read the L3 PTE
169 mov r19=cr.isr // cr.isr bit 0 tells us if this is an insn miss
171 (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
172 mov r22=cr.iha // get the VHPT address that caused the TLB miss
173 ;; // avoid RAW on p7
174 (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
175 dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
177 (p10) itc.i r18 // insert the instruction TLB entry
178 (p11) itc.d r18 // insert the data TLB entry
179 (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
182 #ifdef CONFIG_HUGETLB_PAGE
183 (p8) mov cr.itir=r25 // change to default page-size for VHPT
187 * Now compute and insert the TLB entry for the virtual page table. We never
188 * execute in a page table page so there is no need to set the exception deferral
191 adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
197 * Re-check L2 and L3 pagetable. If they changed, we may have received a ptc.g
198 * between reading the pagetable and the "itc". If so, flush the entry we
199 * inserted and retry.
201 ld8 r25=[r21] // read L3 PTE again
202 ld8 r26=[r17] // read L2 entry again
204 cmp.ne p6,p7=r26,r20 // did L2 entry change
205 mov r27=PAGE_SHIFT<<2
207 (p6) ptc.l r22,r27 // purge PTE page translation
208 (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did L3 PTE change
210 (p6) ptc.l r16,r27 // purge translation
213 mov pr=r31,-1 // restore predicate registers
218 /////////////////////////////////////////////////////////////////////////////////////////
219 // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
223 * The ITLB handler accesses the L3 PTE via the virtually mapped linear
224 * page table. If a nested TLB miss occurs, we switch into physical
225 * mode, walk the page table, and then re-execute the L3 PTE read
226 * and go on normally after that.
228 mov r16=cr.ifa // get virtual address
229 mov r29=b0 // save b0
230 mov r31=pr // save predicates
232 mov r17=cr.iha // get virtual address of L3 PTE
233 movl r30=1f // load nested fault continuation point
235 1: ld8 r18=[r17] // read L3 PTE
238 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
239 (p6) br.cond.spnt page_fault
244 ld8 r19=[r17] // read L3 PTE again and see if same
245 mov r20=PAGE_SHIFT<<2 // setup page size for purge
256 /////////////////////////////////////////////////////////////////////////////////////////
257 // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
261 * The DTLB handler accesses the L3 PTE via the virtually mapped linear
262 * page table. If a nested TLB miss occurs, we switch into physical
263 * mode, walk the page table, and then re-execute the L3 PTE read
264 * and go on normally after that.
266 mov r16=cr.ifa // get virtual address
267 mov r29=b0 // save b0
268 mov r31=pr // save predicates
270 mov r17=cr.iha // get virtual address of L3 PTE
271 movl r30=1f // load nested fault continuation point
273 1: ld8 r18=[r17] // read L3 PTE
276 tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
277 (p6) br.cond.spnt page_fault
282 ld8 r19=[r17] // read L3 PTE again and see if same
283 mov r20=PAGE_SHIFT<<2 // setup page size for purge
294 /////////////////////////////////////////////////////////////////////////////////////////
295 // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
298 mov r16=cr.ifa // get address that caused the TLB miss
301 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
304 #ifdef CONFIG_DISABLE_VHPT
305 shr.u r22=r16,61 // get the region number into r21
307 cmp.gt p8,p0=6,r22 // user mode
312 (p8) mov r29=b0 // save b0
313 (p8) br.cond.dptk itlb_fault
315 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
316 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
317 shr.u r18=r16,57 // move address bit 61 to bit 4
319 andcm r18=0x10,r18 // bit 4=~address-bit(61)
320 cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
321 or r19=r17,r19 // insert PTE control bits into r19
323 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
324 (p8) br.cond.spnt page_fault
326 itc.i r19 // insert the TLB entry
332 /////////////////////////////////////////////////////////////////////////////////////////
333 // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
336 mov r16=cr.ifa // get address that caused the TLB miss
339 movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
343 #ifdef CONFIG_DISABLE_VHPT
344 shr.u r22=r16,61 // get the region number into r21
346 cmp.gt p8,p0=6,r22 // access to region 0-5
351 (p8) mov r29=b0 // save b0
352 (p8) br.cond.dptk dtlb_fault
354 extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
355 and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
356 tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
357 shr.u r18=r16,57 // move address bit 61 to bit 4
358 and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
359 tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
361 andcm r18=0x10,r18 // bit 4=~address-bit(61)
363 (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
364 (p8) br.cond.spnt page_fault
366 dep r21=-1,r21,IA64_PSR_ED_BIT,1
367 or r19=r19,r17 // insert PTE control bits into r19
369 or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
372 (p7) itc.d r19 // insert the TLB entry
377 //-----------------------------------------------------------------------------------
378 // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
385 alloc r15=ar.pfs,0,0,3,0
388 adds r3=8,r2 // set up second base pointer
390 ssm psr.ic | PSR_DEFAULT_BITS
392 srlz.i // guarantee that interruption collectin is on
394 (p15) ssm psr.i // restore psr.i
395 movl r14=ia64_leave_kernel
400 adds out2=16,r12 // out2 = pointer to pt_regs
401 br.call.sptk.many b6=ia64_do_page_fault // ignore return address
405 /////////////////////////////////////////////////////////////////////////////////////////
406 // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
407 ENTRY(nested_dtlb_miss)
409 * In the absence of kernel bugs, we get here when the virtually mapped linear
410 * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
411 * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page
412 * table is missing, a nested TLB miss fault is triggered and control is
413 * transferred to this point. When this happens, we lookup the pte for the
414 * faulting address by walking the page table in physical mode and return to the
415 * continuation point passed in register r30 (or call page_fault if the address is
418 * Input: r16: faulting address
420 * r30: continuation address
423 * Output: r17: physical address of L3 PTE of faulting address
425 * r30: continuation address
428 * Clobbered: b0, r18, r19, r21, psr.dt (cleared)
430 rsm psr.dt // switch to using physical data addressing
431 mov r19=IA64_KR(PT_BASE) // get the page table base address
432 shl r21=r16,3 // shift bit 60 into sign bit
434 shr.u r17=r16,61 // get the region number into r17
436 cmp.eq p6,p7=5,r17 // is faulting address in region 5?
437 shr.u r18=r16,PGDIR_SHIFT // get bits 33-63 of faulting address
439 (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
441 (p6) movl r19=__pa(swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
442 (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
443 (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
445 (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=PTA + IFA(33,42)*8
446 (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=PTA + (((IFA(61,63) << 7) | IFA(33,39))*8)
447 cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
448 shr.u r18=r16,PMD_SHIFT // shift L2 index into position
450 ld8 r17=[r17] // fetch the L1 entry (may be 0)
452 (p7) cmp.eq p6,p7=r17,r0 // was L1 entry NULL?
453 dep r17=r18,r17,3,(PAGE_SHIFT-3) // compute address of L2 page table entry
455 (p7) ld8 r17=[r17] // fetch the L2 entry (may be 0)
456 shr.u r19=r16,PAGE_SHIFT // shift L3 index into position
458 (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was L2 entry NULL?
459 dep r17=r19,r17,3,(PAGE_SHIFT-3) // compute address of L3 page table entry
460 (p6) br.cond.spnt page_fault
462 br.sptk.many b0 // return to continuation point
463 END(nested_dtlb_miss)
466 /////////////////////////////////////////////////////////////////////////////////////////
467 // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
474 /////////////////////////////////////////////////////////////////////////////////////////
475 // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
482 /////////////////////////////////////////////////////////////////////////////////////////
483 // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
487 * What we do here is to simply turn on the dirty bit in the PTE. We need to
488 * update both the page-table and the TLB entry. To efficiently access the PTE,
489 * we address it through the virtual page table. Most likely, the TLB entry for
490 * the relevant virtual page table page is still present in the TLB so we can
491 * normally do this without additional TLB misses. In case the necessary virtual
492 * page table TLB entry isn't present, we take a nested TLB miss hit where we look
493 * up the physical address of the L3 PTE and then continue at label 1 below.
495 mov r16=cr.ifa // get the address that caused the fault
496 movl r30=1f // load continuation point in case of nested fault
498 thash r17=r16 // compute virtual address of L3 PTE
499 mov r29=b0 // save b0 in case of nested fault
500 mov r31=pr // save pr
502 mov r28=ar.ccv // save ar.ccv
505 ;; // avoid RAW on r18
506 mov ar.ccv=r18 // set compare value for cmpxchg
507 or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
509 cmpxchg8.acq r26=[r17],r25,ar.ccv
510 mov r24=PAGE_SHIFT<<2
514 (p6) itc.d r25 // install updated PTE
516 ld8 r18=[r17] // read PTE again
518 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
521 mov b0=r29 // restore b0
526 ;; // avoid RAW on r18
527 or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
528 mov b0=r29 // restore b0
530 st8 [r17]=r18 // store back updated PTE
531 itc.d r18 // install updated PTE
533 mov pr=r31,-1 // restore pr
538 /////////////////////////////////////////////////////////////////////////////////////////
539 // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
542 // Like Entry 8, except for instruction access
543 mov r16=cr.ifa // get the address that caused the fault
544 movl r30=1f // load continuation point in case of nested fault
545 mov r31=pr // save predicates
546 #ifdef CONFIG_ITANIUM
548 * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
553 tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
555 (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
556 #endif /* CONFIG_ITANIUM */
558 thash r17=r16 // compute virtual address of L3 PTE
559 mov r29=b0 // save b0 in case of nested fault)
561 mov r28=ar.ccv // save ar.ccv
565 mov ar.ccv=r18 // set compare value for cmpxchg
566 or r25=_PAGE_A,r18 // set the accessed bit
568 cmpxchg8.acq r26=[r17],r25,ar.ccv
569 mov r24=PAGE_SHIFT<<2
573 (p6) itc.i r25 // install updated PTE
575 ld8 r18=[r17] // read PTE again
577 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
580 mov b0=r29 // restore b0
582 #else /* !CONFIG_SMP */
586 or r18=_PAGE_A,r18 // set the accessed bit
587 mov b0=r29 // restore b0
589 st8 [r17]=r18 // store back updated PTE
590 itc.i r18 // install updated PTE
591 #endif /* !CONFIG_SMP */
597 /////////////////////////////////////////////////////////////////////////////////////////
598 // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
601 // Like Entry 8, except for data access
602 mov r16=cr.ifa // get the address that caused the fault
603 movl r30=1f // load continuation point in case of nested fault
605 thash r17=r16 // compute virtual address of L3 PTE
607 mov r29=b0 // save b0 in case of nested fault)
609 mov r28=ar.ccv // save ar.ccv
612 ;; // avoid RAW on r18
613 mov ar.ccv=r18 // set compare value for cmpxchg
614 or r25=_PAGE_A,r18 // set the dirty bit
616 cmpxchg8.acq r26=[r17],r25,ar.ccv
617 mov r24=PAGE_SHIFT<<2
621 (p6) itc.d r25 // install updated PTE
623 ld8 r18=[r17] // read PTE again
625 cmp.eq p6,p7=r18,r25 // is it same as the newly installed
632 ;; // avoid RAW on r18
633 or r18=_PAGE_A,r18 // set the accessed bit
635 st8 [r17]=r18 // store back updated PTE
636 itc.d r18 // install updated PTE
638 mov b0=r29 // restore b0
644 /////////////////////////////////////////////////////////////////////////////////////////
645 // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
648 /* System call entry/exit only saves/restores part of pt_regs, i.e. no scratch registers
649 * are saved/restored except r15 which contains syscall number and needs to be saved in the
650 * entry. This optimization is based on the assumption that applications only call glibc
651 * system call interface which doesn't use scratch registers after break into kernel.
652 * Registers saved/restored during system call entry/exit are listed as follows:
654 * Registers to be saved & restored:
655 * CR registers: cr_ipsr, cr_iip, cr_ifs
656 * AR registers: ar_unat, ar_pfs, ar_rsc, ar_rnat, ar_bspstore, ar_fpsr
657 * others: pr, b0, loadrs, r1, r12, r13, r15
658 * Registers to be restored only:
659 * r8~r11: output value from the system call.
661 * During system call exit, scratch registers (including r15) are modified/cleared to
662 * prevent leaking bits from kernel to user level.
666 mov r17=__IA64_BREAK_SYSCALL
667 mov r31=pr // prepare to save predicates
669 cmp.eq p0,p7=r16,r17 // is this a system call? (p7 <- false, if so)
670 (p7) br.cond.spnt non_syscall
679 mov r1=IA64_KR(CURRENT); /* r1 = current (physical) */
683 /* adjust return address so we skip over the break instruction: */
685 extr.u r8=r29,41,2 // extract ei field from cr.ipsr
686 extr.u r16=r29,32,2; /* extract psr.cpl */
688 cmp.eq p6,p7=2,r8 // isr.ei==2?
689 cmp.eq pKern,pUser=r0,r16; /* are we in kernel mode already? (psr.cpl==0) */
691 (p6) mov r8=0 // clear ei to 0
692 (p6) adds r28=16,r28 // switch cr.iip to next bundle cr.ipsr.ei wrapped
693 (p7) adds r8=1,r8 // increment ei to next slot
695 dep r29=r8,r29,41,2 // insert new ei into cr.ipsr
698 /* switch from user to kernel RBS: */
700 MINSTATE_START_SAVE_MIN_VIRT
701 br.call.sptk.many b7=ia64_syscall_setup
703 // p10==true means out registers are more than 8 or r15's Nat is true
704 (p10) br.cond.spnt.many ia64_ret_from_syscall
706 adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
707 adds r2=IA64_TASK_PTRACE_OFFSET,r13 // r2 = ¤t->ptrace
709 cmp.geu p6,p7=r3,r15 // (syscall > 0 && syscall <= 1024+255) ?
710 movl r16=sys_call_table
712 (p6) shladd r16=r15,3,r16
713 movl r15=ia64_ret_from_syscall
714 (p7) adds r16=(__NR_ni_syscall-1024)*8,r16 // force __NR_ni_syscall
716 ld8 r16=[r16] // load address of syscall entry point
717 mov rp=r15 // set the real return addr
719 ld8 r2=[r2] // r2 = current->ptrace
723 tbit.z p8,p0=r2,PT_TRACESYS_BIT // (current->ptrace & PF_TRACESYS) == 0?
725 (p8) br.call.sptk.many b6=b6 // ignore this return addr
727 br.cond.sptk ia64_trace_syscall
733 /////////////////////////////////////////////////////////////////////////////////////////
734 // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
737 mov r31=pr // prepare to save predicates
739 SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
740 ssm psr.ic | PSR_DEFAULT_BITS
742 adds r3=8,r2 // set up second base pointer for SAVE_REST
743 srlz.i // ensure everybody knows psr.ic is back on
747 alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
748 mov out0=cr.ivr // pass cr.ivr as first arg
749 add out1=16,sp // pass pointer to pt_regs as second arg
751 srlz.d // make sure we see the effect of cr.ivr
752 movl r14=ia64_leave_kernel
755 br.call.sptk.many b6=ia64_handle_irq
759 /////////////////////////////////////////////////////////////////////////////////////////
760 // 0x3400 Entry 13 (size 64 bundles) Reserved
765 /////////////////////////////////////////////////////////////////////////////////////////
766 // 0x3800 Entry 14 (size 64 bundles) Reserved
771 * There is no particular reason for this code to be here, other than that
772 * there happens to be space here that would go unused otherwise. If this
773 * fault ever gets "unreserved", simply moved the following code to a more
776 * ia64_syscall_setup() is a separate subroutine so that it can
777 * allocate stacked registers so it can safely demine any
778 * potential NaT values from the input registers.
781 * - executing on bank 0 or bank 1 register set (doesn't matter)
782 * - r1: stack pointer
783 * - r2: current task pointer
785 * - r12: original contents (sp to be saved)
786 * - r13: original contents (tp to be saved)
787 * - r15: original contents (syscall # to be saved)
788 * - r18: saved bsp (after switching to kernel stack)
789 * - r20: saved r1 (gp)
790 * - r21: saved ar.fpsr
791 * - r22: kernel's register backing store base (krbs_base)
792 * - r23: saved ar.bspstore
793 * - r24: saved ar.rnat
794 * - r25: saved ar.unat
795 * - r26: saved ar.pfs
796 * - r27: saved ar.rsc
797 * - r28: saved cr.iip
798 * - r29: saved cr.ipsr
800 * - b0: original contents (to be saved)
802 * - executing on bank 1 registers
803 * - psr.ic enabled, interrupts restored
805 * - r3: preserved (same as on entry)
806 * - r8: -EINVAL if p10 is true
807 * - r12: points to kernel stack
808 * - r13: points to current task
809 * - p10: TRUE if syscall is invoked with more than 8 out
810 * registers or r15's Nat is true
811 * - p15: TRUE if interrupts need to be re-enabled
812 * - ar.fpsr: set to kernel settings
814 ENTRY(ia64_syscall_setup)
815 alloc r19=ar.pfs,8,0,0,0
817 add r16=PT(CR_IPSR),r1 /* initialize first base pointer */
819 st8 [r16]=r29,16; /* save cr.ipsr */
820 adds r17=PT(CR_IIP),r1; /* initialize second base pointer */
824 st8 [r17]=r28,16; /* save cr.iip */
826 (pKern) mov r18=r0; /* make sure r18 isn't NaT */
827 extr.u r11=r19,7,7 /* get sol of ar.pfs */
828 and r8=0x7f,r19 /* get sof of ar.pfs */
832 st8 [r16]=r30,16; /* save cr.ifs */
833 st8 [r17]=r25,16; /* save ar.unat */
834 (pUser) sub r18=r18,r22; /* r18=RSE.ndirty*8 */
837 st8 [r16]=r26,16; /* save ar.pfs */
838 st8 [r17]=r27,16; /* save ar.rsc */
839 tbit.nz p15,p0=r29,IA64_PSR_I_BIT
840 ;; /* avoid RAW on r16 & r17 */
844 (pKern) adds r16=16,r16; /* skip over ar_rnat field */
845 (pKern) adds r17=16,r17; /* skip over ar_bspstore field */
846 shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */
850 (pUser) st8 [r16]=r24,16; /* save ar.rnat */
851 (pUser) st8 [r17]=r23,16; /* save ar.bspstore */
855 st8 [r16]=r31,16; /* save predicates */
856 st8 [r17]=r28,16; /* save b0 */
859 st8 [r16]=r18,16; /* save ar.rsc value for "loadrs" */
860 st8.spill [r17]=r20,16; /* save original r1 */
861 adds r2=IA64_PT_REGS_R16_OFFSET,r1;
865 .mem.offset 0,0; st8.spill [r16]=r12,16;
866 .mem.offset 8,0; st8.spill [r17]=r13,16;
867 cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */
871 .mem.offset 0,0; st8 [r16]=r21,16; /* ar.fpsr */
872 .mem.offset 8,0; st8.spill [r17]=r15,16;
873 adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */
875 cmp.lt p10,p9=r11,r8 /* frame size can't be more than local+8 */
876 mov r13=IA64_KR(CURRENT); /* establish `current' */
877 movl r1=__gp; /* establish kernel global pointer */
879 MINSTATE_END_SAVE_MIN_VIRT
881 (p9) tnat.nz p10,p0=r15
883 ssm psr.ic | PSR_DEFAULT_BITS
884 movl r17=FPSR_DEFAULT
885 adds r8=(IA64_PT_REGS_R8_OFFSET-IA64_PT_REGS_R16_OFFSET),r2
887 srlz.i // guarantee that interruption collection is on
888 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
889 (p15) ssm psr.i // restore psr.i
891 stf8 [r8]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
894 END(ia64_syscall_setup)
897 /////////////////////////////////////////////////////////////////////////////////////////
898 // 0x3c00 Entry 15 (size 64 bundles) Reserved
903 * Squatting in this space ...
905 * This special case dispatcher for illegal operation faults allows preserved
906 * registers to be modified through a callback function (asm only) that is handed
907 * back from the fault handler in r8. Up to three arguments can be passed to the
908 * callback function by returning an aggregate with the callback as its first
909 * element, followed by the arguments.
911 ENTRY(dispatch_illegal_op_fault)
913 ssm psr.ic | PSR_DEFAULT_BITS
915 srlz.i // guarantee that interruption collection is on
917 (p15) ssm psr.i // restore psr.i
918 adds r3=8,r2 // set up second base pointer for SAVE_REST
920 alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
925 br.call.sptk.many rp=ia64_illegal_op_fault
927 alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
931 movl r15=ia64_leave_kernel
937 (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
938 br.sptk.many ia64_leave_kernel
939 END(dispatch_illegal_op_fault)
942 /////////////////////////////////////////////////////////////////////////////////////////
943 // 0x4000 Entry 16 (size 64 bundles) Reserved
948 /////////////////////////////////////////////////////////////////////////////////////////
949 // 0x4400 Entry 17 (size 64 bundles) Reserved
956 // There is no particular reason for this code to be here, other than that
957 // there happens to be space here that would go unused otherwise. If this
958 // fault ever gets "unreserved", simply moved the following code to a more
961 alloc r14=ar.pfs,0,0,2,0
964 adds r3=8,r2 // set up second base pointer for SAVE_REST
966 ssm psr.ic | PSR_DEFAULT_BITS
968 srlz.i // guarantee that interruption collection is on
970 (p15) ssm psr.i // restore psr.i
971 movl r15=ia64_leave_kernel
976 br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
980 /////////////////////////////////////////////////////////////////////////////////////////
981 // 0x4800 Entry 18 (size 64 bundles) Reserved
986 * There is no particular reason for this code to be here, other than that
987 * there happens to be space here that would go unused otherwise. If this
988 * fault ever gets "unreserved", simply moved the following code to a more
992 ENTRY(dispatch_unaligned_handler)
995 alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
999 ssm psr.ic | PSR_DEFAULT_BITS
1001 srlz.i // guarantee that interruption collection is on
1003 (p15) ssm psr.i // restore psr.i
1004 adds r3=8,r2 // set up second base pointer
1007 movl r14=ia64_leave_kernel
1010 br.sptk.many ia64_prepare_handle_unaligned
1011 END(dispatch_unaligned_handler)
1014 /////////////////////////////////////////////////////////////////////////////////////////
1015 // 0x4c00 Entry 19 (size 64 bundles) Reserved
1020 * There is no particular reason for this code to be here, other than that
1021 * there happens to be space here that would go unused otherwise. If this
1022 * fault ever gets "unreserved", simply moved the following code to a more
1026 ENTRY(dispatch_to_fault_handler)
1030 * r19: fault vector number (e.g., 24 for General Exception)
1031 * r31: contains saved predicates (pr)
1033 SAVE_MIN_WITH_COVER_R19
1034 alloc r14=ar.pfs,0,0,5,0
1041 ssm psr.ic | PSR_DEFAULT_BITS
1043 srlz.i // guarantee that interruption collection is on
1045 (p15) ssm psr.i // restore psr.i
1046 adds r3=8,r2 // set up second base pointer for SAVE_REST
1049 movl r14=ia64_leave_kernel
1052 br.call.sptk.many b6=ia64_fault
1053 END(dispatch_to_fault_handler)
1056 // --- End of long entries, Beginning of short entries
1060 /////////////////////////////////////////////////////////////////////////////////////////
1061 // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
1062 ENTRY(page_not_present)
1067 * The Linux page fault handler doesn't expect non-present pages to be in
1068 * the TLB. Flush the existing entry now, so we meet that expectation.
1070 mov r17=PAGE_SHIFT<<2
1076 br.sptk.many page_fault
1077 END(page_not_present)
1080 /////////////////////////////////////////////////////////////////////////////////////////
1081 // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
1082 ENTRY(key_permission)
1089 br.sptk.many page_fault
1093 /////////////////////////////////////////////////////////////////////////////////////////
1094 // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
1095 ENTRY(iaccess_rights)
1102 br.sptk.many page_fault
1106 /////////////////////////////////////////////////////////////////////////////////////////
1107 // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
1108 ENTRY(daccess_rights)
1115 br.sptk.many page_fault
1119 /////////////////////////////////////////////////////////////////////////////////////////
1120 // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
1121 ENTRY(general_exception)
1127 (p6) br.sptk.many dispatch_illegal_op_fault
1129 mov r19=24 // fault number
1130 br.sptk.many dispatch_to_fault_handler
1131 END(general_exception)
1134 /////////////////////////////////////////////////////////////////////////////////////////
1135 // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
1136 ENTRY(disabled_fp_reg)
1138 rsm psr.dfh // ensure we can access fph
1143 br.sptk.many dispatch_to_fault_handler
1144 END(disabled_fp_reg)
1147 /////////////////////////////////////////////////////////////////////////////////////////
1148 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
1149 ENTRY(nat_consumption)
1152 END(nat_consumption)
1155 /////////////////////////////////////////////////////////////////////////////////////////
1156 // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
1157 ENTRY(speculation_vector)
1160 * A [f]chk.[as] instruction needs to take the branch to the recovery code but
1161 * this part of the architecture is not implemented in hardware on some CPUs, such
1162 * as Itanium. Thus, in general we need to emulate the behavior. IIM contains
1163 * the relative target (not yet sign extended). So after sign extending it we
1164 * simply add it to IIP. We also need to reset the EI field of the IPSR to zero,
1165 * i.e., the slot to restart into.
1167 * cr.imm contains zero_ext(imm21)
1172 shl r18=r18,43 // put sign bit in position (43=64-21)
1176 shr r18=r18,39 // sign extend (39=43-4)
1179 add r17=r17,r18 // now add the offset
1182 dep r16=0,r16,41,2 // clear EI
1189 END(speculation_vector)
1192 /////////////////////////////////////////////////////////////////////////////////////////
1193 // 0x5800 Entry 28 (size 16 bundles) Reserved
1198 /////////////////////////////////////////////////////////////////////////////////////////
1199 // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
1206 /////////////////////////////////////////////////////////////////////////////////////////
1207 // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
1208 ENTRY(unaligned_access)
1211 mov r31=pr // prepare to save predicates
1213 br.sptk.many dispatch_unaligned_handler
1214 END(unaligned_access)
1217 /////////////////////////////////////////////////////////////////////////////////////////
1218 // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
1223 /////////////////////////////////////////////////////////////////////////////////////////
1224 // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
1229 /////////////////////////////////////////////////////////////////////////////////////////
1230 // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
1235 /////////////////////////////////////////////////////////////////////////////////////////
1236 // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Tranfer Trap (66)
1241 /////////////////////////////////////////////////////////////////////////////////////////
1242 // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
1247 /////////////////////////////////////////////////////////////////////////////////////////
1248 // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
1253 /////////////////////////////////////////////////////////////////////////////////////////
1254 // 0x6100 Entry 37 (size 16 bundles) Reserved
1259 /////////////////////////////////////////////////////////////////////////////////////////
1260 // 0x6200 Entry 38 (size 16 bundles) Reserved
1265 /////////////////////////////////////////////////////////////////////////////////////////
1266 // 0x6300 Entry 39 (size 16 bundles) Reserved
1271 /////////////////////////////////////////////////////////////////////////////////////////
1272 // 0x6400 Entry 40 (size 16 bundles) Reserved
1277 /////////////////////////////////////////////////////////////////////////////////////////
1278 // 0x6500 Entry 41 (size 16 bundles) Reserved
1283 /////////////////////////////////////////////////////////////////////////////////////////
1284 // 0x6600 Entry 42 (size 16 bundles) Reserved
1289 /////////////////////////////////////////////////////////////////////////////////////////
1290 // 0x6700 Entry 43 (size 16 bundles) Reserved
1295 /////////////////////////////////////////////////////////////////////////////////////////
1296 // 0x6800 Entry 44 (size 16 bundles) Reserved
1301 /////////////////////////////////////////////////////////////////////////////////////////
1302 // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
1303 ENTRY(ia32_exception)
1309 /////////////////////////////////////////////////////////////////////////////////////////
1310 // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
1311 ENTRY(ia32_intercept)
1313 #ifdef CONFIG_IA32_SUPPORT
1317 extr.u r17=r16,16,8 // get ISR.code
1319 mov r19=cr.iim // old eflag value
1322 (p6) br.cond.spnt 1f // not a system flag fault
1325 extr.u r17=r16,18,1 // get the eflags.ac bit
1328 (p6) br.cond.spnt 1f // eflags.ac bit didn't change
1330 mov pr=r31,-1 // restore predicate registers
1334 #endif // CONFIG_IA32_SUPPORT
1339 /////////////////////////////////////////////////////////////////////////////////////////
1340 // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
1341 ENTRY(ia32_interrupt)
1343 #ifdef CONFIG_IA32_SUPPORT
1345 br.sptk.many dispatch_to_ia32_handler
1352 /////////////////////////////////////////////////////////////////////////////////////////
1353 // 0x6c00 Entry 48 (size 16 bundles) Reserved
1358 /////////////////////////////////////////////////////////////////////////////////////////
1359 // 0x6d00 Entry 49 (size 16 bundles) Reserved
1364 /////////////////////////////////////////////////////////////////////////////////////////
1365 // 0x6e00 Entry 50 (size 16 bundles) Reserved
1370 /////////////////////////////////////////////////////////////////////////////////////////
1371 // 0x6f00 Entry 51 (size 16 bundles) Reserved
1376 /////////////////////////////////////////////////////////////////////////////////////////
1377 // 0x7000 Entry 52 (size 16 bundles) Reserved
1382 /////////////////////////////////////////////////////////////////////////////////////////
1383 // 0x7100 Entry 53 (size 16 bundles) Reserved
1388 /////////////////////////////////////////////////////////////////////////////////////////
1389 // 0x7200 Entry 54 (size 16 bundles) Reserved
1394 /////////////////////////////////////////////////////////////////////////////////////////
1395 // 0x7300 Entry 55 (size 16 bundles) Reserved
1400 /////////////////////////////////////////////////////////////////////////////////////////
1401 // 0x7400 Entry 56 (size 16 bundles) Reserved
1406 /////////////////////////////////////////////////////////////////////////////////////////
1407 // 0x7500 Entry 57 (size 16 bundles) Reserved
1412 /////////////////////////////////////////////////////////////////////////////////////////
1413 // 0x7600 Entry 58 (size 16 bundles) Reserved
1418 /////////////////////////////////////////////////////////////////////////////////////////
1419 // 0x7700 Entry 59 (size 16 bundles) Reserved
1424 /////////////////////////////////////////////////////////////////////////////////////////
1425 // 0x7800 Entry 60 (size 16 bundles) Reserved
1430 /////////////////////////////////////////////////////////////////////////////////////////
1431 // 0x7900 Entry 61 (size 16 bundles) Reserved
1436 /////////////////////////////////////////////////////////////////////////////////////////
1437 // 0x7a00 Entry 62 (size 16 bundles) Reserved
1442 /////////////////////////////////////////////////////////////////////////////////////////
1443 // 0x7b00 Entry 63 (size 16 bundles) Reserved
1448 /////////////////////////////////////////////////////////////////////////////////////////
1449 // 0x7c00 Entry 64 (size 16 bundles) Reserved
1454 /////////////////////////////////////////////////////////////////////////////////////////
1455 // 0x7d00 Entry 65 (size 16 bundles) Reserved
1460 /////////////////////////////////////////////////////////////////////////////////////////
1461 // 0x7e00 Entry 66 (size 16 bundles) Reserved
1466 /////////////////////////////////////////////////////////////////////////////////////////
1467 // 0x7f00 Entry 67 (size 16 bundles) Reserved
1471 #ifdef CONFIG_IA32_SUPPORT
1474 * There is no particular reason for this code to be here, other than that
1475 * there happens to be space here that would go unused otherwise. If this
1476 * fault ever gets "unreserved", simply moved the following code to a more
1480 // IA32 interrupt entry point
1482 ENTRY(dispatch_to_ia32_handler)
1486 ssm psr.ic | PSR_DEFAULT_BITS
1488 srlz.i // guarantee that interruption collection is on
1491 adds r3=8,r2 // Base pointer for SAVE_REST
1496 shr r14=r14,16 // Get interrupt number
1498 cmp.ne p6,p0=r14,r15
1499 (p6) br.call.dpnt.many b6=non_ia32_syscall
1501 adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
1502 adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
1504 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1505 ld8 r8=[r14] // get r8
1507 st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
1509 alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
1511 ld4 r8=[r14],8 // r8 == eax (syscall number)
1512 mov r15=230 // number of entries in ia32 system call table
1514 cmp.ltu.unc p6,p7=r8,r15
1515 ld4 out1=[r14],8 // r9 == ecx
1517 ld4 out2=[r14],8 // r10 == edx
1519 ld4 out0=[r14] // r11 == ebx
1520 adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
1522 ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
1524 ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
1525 adds r2=IA64_TASK_PTRACE_OFFSET,r13 // r2 = ¤t->ptrace
1527 ld4 out4=[r14] // r15 == edi
1528 movl r16=ia32_syscall_table
1530 (p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
1531 ld8 r2=[r2] // r2 = current->ptrace
1534 tbit.z p8,p0=r2,PT_TRACESYS_BIT // (current->ptrace & PT_TRACESYS) == 0?
1537 movl r15=ia32_ret_from_syscall
1540 (p8) br.call.sptk.many b6=b6
1541 br.cond.sptk ia32_trace_syscall
1544 alloc r15=ar.pfs,0,0,2,0
1545 mov out0=r14 // interrupt #
1546 add out1=16,sp // pointer to pt_regs
1547 ;; // avoid WAW on CFM
1548 br.call.sptk.many rp=ia32_bad_interrupt
1549 .ret1: movl r15=ia64_leave_kernel
1553 END(dispatch_to_ia32_handler)
1555 #endif /* CONFIG_IA32_SUPPORT */