2 * TLB exception handling code for MIPS32 CPUs.
4 * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse
6 * Multi-cpu abstraction and reworking:
7 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
9 * Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
12 * Pete Popov, ppopov@pacbell.net
13 * Added 36 bit phys address support.
14 * Copyright (C) 2002 MontaVista Software, Inc.
16 #include <linux/init.h>
19 #include <asm/current.h>
20 #include <asm/offset.h>
21 #include <asm/cachectl.h>
22 #include <asm/fpregdef.h>
23 #include <asm/mipsregs.h>
25 #include <asm/pgtable.h>
26 #include <asm/processor.h>
27 #include <asm/regdef.h>
28 #include <asm/stackframe.h>
30 #define TLB_OPTIMIZE /* If you are paranoid, disable this. */
32 #ifdef CONFIG_64BIT_PHYS_ADDR
34 /* We really only support 36 bit physical addresses on MIPS32 */
39 #define PTE_HALF 4 /* pte_high contains pre-shifted, ready to go entry */
41 #define PTEP_INDX_MSK 0xff0
42 #define PTE_INDX_MSK 0xff8
43 #define PTE_INDX_SHIFT 9
44 #define CONVERT_PTE(pte)
45 #define PTE_MAKEWRITE_HIGH(pte, ptr) \
47 ori pte, (_PAGE_VALID | _PAGE_DIRTY); \
51 #define PTE_MAKEVALID_HIGH(pte, ptr) \
53 ori pte, pte, _PAGE_VALID; \
65 #define PTEP_INDX_MSK 0xff8
66 #define PTE_INDX_MSK 0xffc
67 #define PTE_INDX_SHIFT 10
68 #define CONVERT_PTE(pte) srl pte, pte, 6
69 #define PTE_MAKEWRITE_HIGH(pte, ptr)
70 #define PTE_MAKEVALID_HIGH(pte, ptr)
72 #endif /* CONFIG_64BIT_PHYS_ADDR */
76 #ifdef CONFIG_64BIT_PHYS_ADDR
77 #define GET_PTE_OFF(reg)
79 #define GET_PTE_OFF(reg) srl reg, reg, 1
83 * These handlers much be written in a relocatable manner
84 * because based upon the cpu type an arbitrary one of the
85 * following pieces of code will be copied to the KSEG0
88 /* TLB refill, EXL == 0, MIPS32 version */
91 LEAF(except_vec0_r4000)
97 sll k1, 2 # log2(sizeof(pgd_t)
101 lw k1, pgd_current # get pgd pointer
104 mfc0 k0, CP0_BADVADDR # Get faulting address
105 srl k0, k0, _PGDIR_SHIFT # get pgd only bits
108 addu k1, k1, k0 # add in pgd offset
109 mfc0 k0, CP0_CONTEXT # get context reg
111 GET_PTE_OFF(k0) # get pte offset
112 and k0, k0, PTEP_INDX_MSK
113 addu k1, k1, k0 # add in offset
115 PTE_L k0, PTE_HALF(k1) # get even pte
117 P_MTC0 k0, CP0_ENTRYLO0 # load it
118 PTE_L k1, (PTE_HALF+PTE_SIZE)(k1) # get odd pte
120 P_MTC0 k1, CP0_ENTRYLO1 # load it
122 tlbwr # write random tlb entry
125 eret # return from trap
126 END(except_vec0_r4000)
129 * These are here to avoid putting ifdefs in tlb-r4k.c
133 LEAF(except_vec0_nevada)
135 PANIC("Nevada Exception Vec 0 called")
136 END(except_vec0_nevada)
140 LEAF(except_vec0_r4600)
142 PANIC("R4600 Exception Vec 0 called")
143 END(except_vec0_r4600)
148 * ABUSE of CPP macros 101.
150 * After this macro runs, the pte faulted on is
151 * in register PTE, a ptr into the table in which
152 * the pte belongs is in PTR.
156 #define GET_PGD(scratch, ptr) \
157 mfc0 ptr, CP0_CONTEXT; \
158 la scratch, pgd_current;\
161 addu ptr, scratch, ptr; \
164 #define GET_PGD(scratch, ptr) \
168 #define LOAD_PTE(pte, ptr) \
170 mfc0 pte, CP0_BADVADDR; \
171 srl pte, pte, _PGDIR_SHIFT; \
173 addu ptr, ptr, pte; \
174 mfc0 pte, CP0_BADVADDR; \
176 srl pte, pte, PTE_INDX_SHIFT; \
177 and pte, pte, PTE_INDX_MSK; \
178 addu ptr, ptr, pte; \
181 /* This places the even/odd pte pair in the page
182 * table at PTR into ENTRYLO0 and ENTRYLO1 using
183 * TMP as a scratch register.
185 #define PTE_RELOAD(ptr, tmp) \
186 ori ptr, ptr, PTE_SIZE; \
187 xori ptr, ptr, PTE_SIZE; \
188 PTE_L tmp, (PTE_HALF+PTE_SIZE)(ptr); \
190 P_MTC0 tmp, CP0_ENTRYLO1; \
191 PTE_L ptr, PTE_HALF(ptr); \
193 P_MTC0 ptr, CP0_ENTRYLO0;
195 #define DO_FAULT(write) \
198 mfc0 a2, CP0_BADVADDR; \
204 j ret_from_exception; \
208 /* Check is PTE is present, if not then jump to LABEL.
209 * PTR points to the page table where this PTE is located,
210 * when the macro is done executing PTE will be restored
211 * with it's original value.
213 #define PTE_PRESENT(pte, ptr, label) \
214 andi pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
215 xori pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
219 /* Make PTE valid, store result in PTR. */
220 #define PTE_MAKEVALID(pte, ptr) \
221 ori pte, pte, (_PAGE_VALID | _PAGE_ACCESSED); \
224 /* Check if PTE can be written to, if not branch to LABEL.
225 * Regardless restore PTE with value from PTR when done.
227 #define PTE_WRITABLE(pte, ptr, label) \
228 andi pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
229 xori pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
233 /* Make PTE writable, update software status bits as well,
236 #define PTE_MAKEWRITE(pte, ptr) \
237 ori pte, pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | \
238 _PAGE_VALID | _PAGE_DIRTY); \
243 #define R5K_HAZARD nop
246 NESTED(handle_tlbl, PT_SIZE, sp)
250 /* Test present bit in entry. */
254 PTE_PRESENT(k0, k1, nopage_tlbl)
255 PTE_MAKEVALID_HIGH(k0, k1)
256 PTE_MAKEVALID(k0, k1)
273 NESTED(handle_tlbs, PT_SIZE, sp)
280 tlbp # find faulting entry
281 PTE_WRITABLE(k0, k1, nopage_tlbs)
282 PTE_MAKEWRITE(k0, k1)
283 PTE_MAKEWRITE_HIGH(k0, k1)
300 NESTED(handle_mod, PT_SIZE, sp)
306 tlbp # find faulting entry
307 andi k0, k0, _PAGE_WRITE
311 /* Present and writable bits set, set accessed and dirty bits. */
312 PTE_MAKEWRITE(k0, k1)
313 PTE_MAKEWRITE_HIGH(k0, k1)
314 /* Now reload the entry into the tlb. */