2 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
3 * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
4 * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 #include <linux/init.h>
21 #include <asm/mmu_context.h>
22 #include <asm/bootinfo.h>
25 extern void build_tlb_refill_handler(void);
27 #define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
29 /* Dump the current entry* and pagemask registers */
30 static inline void dump_cur_tlb_regs(void)
32 unsigned int entryhihi, entryhilo, entrylo0hi, entrylo0lo, entrylo1hi;
33 unsigned int entrylo1lo, pagemask;
35 __asm__ __volatile__ (
42 " dsrl32 %0, $1, 0 \n"
45 " dsrl32 %2, $1, 0 \n"
48 " dsrl32 %4, $1, 0 \n"
52 : "=r" (entryhihi), "=r" (entryhilo),
53 "=r" (entrylo0hi), "=r" (entrylo0lo),
54 "=r" (entrylo1hi), "=r" (entrylo1lo),
57 printk("%08X%08X %08X%08X %08X%08X %08X",
59 entrylo0hi, entrylo0lo,
60 entrylo1hi, entrylo1lo,
64 void sb1_dump_tlb(void)
66 unsigned long old_ctx;
69 local_irq_save(flags);
70 old_ctx = read_c0_entryhi();
71 printk("Current TLB registers state:\n"
72 " EntryHi EntryLo0 EntryLo1 PageMask Index\n"
73 "--------------------------------------------------------------------\n");
75 printk(" %08X\n", read_c0_index());
76 printk("\n\nFull TLB Dump:\n"
77 "Idx EntryHi EntryLo0 EntryLo1 PageMask\n"
78 "--------------------------------------------------------------\n");
79 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
80 write_c0_index(entry);
81 printk("\n%02i ", entry);
85 write_c0_entryhi(old_ctx);
86 local_irq_restore(flags);
89 void local_flush_tlb_all(void)
92 unsigned long old_ctx;
95 local_irq_save(flags);
96 /* Save old context and create impossible VPN2 value */
97 old_ctx = read_c0_entryhi();
101 entry = read_c0_wired();
102 while (entry < current_cpu_data.tlbsize) {
103 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
104 write_c0_index(entry);
108 write_c0_entryhi(old_ctx);
109 local_irq_restore(flags);
114 * Use a bogus region of memory (starting at 0) to sanitize the TLB's.
115 * Use increments of the maximum page size (16MB), and check for duplicate
116 * entries before doing a given write. Then, when we're safe from collisions
117 * with the firmware, go back and give all the entries invalid addresses with
118 * the normal flush routine. Wired entries will be killed as well!
120 static void __init sb1_sanitize_tlb(void)
125 long inc = 1<<24; /* 16MB */
126 /* Save old context and create impossible VPN2 value */
127 write_c0_entrylo0(0);
128 write_c0_entrylo1(0);
129 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
132 write_c0_entryhi(addr);
134 } while ((int)(read_c0_index()) >= 0);
135 write_c0_index(entry);
138 /* Now that we know we're safe from collisions, we can safely flush
139 the TLB with the "normal" routine. */
140 local_flush_tlb_all();
143 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
146 struct mm_struct *mm = vma->vm_mm;
147 int cpu = smp_processor_id();
149 if (cpu_context(cpu, mm) != 0) {
153 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
154 size = (size + 1) >> 1;
155 local_irq_save(flags);
156 if (size <= (current_cpu_data.tlbsize/2)) {
157 int oldpid = read_c0_entryhi();
158 int newpid = cpu_asid(cpu, mm);
160 start &= (PAGE_MASK << 1);
161 end += ((PAGE_SIZE << 1) - 1);
162 end &= (PAGE_MASK << 1);
163 while (start < end) {
166 write_c0_entryhi(start | newpid);
167 start += (PAGE_SIZE << 1);
169 idx = read_c0_index();
170 write_c0_entrylo0(0);
171 write_c0_entrylo1(0);
174 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
177 write_c0_entryhi(oldpid);
179 drop_mmu_context(mm, cpu);
181 local_irq_restore(flags);
185 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
190 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
191 size = (size + 1) >> 1;
192 local_irq_save(flags);
193 if (size <= (current_cpu_data.tlbsize/2)) {
194 int pid = read_c0_entryhi();
196 start &= (PAGE_MASK << 1);
197 end += ((PAGE_SIZE << 1) - 1);
198 end &= (PAGE_MASK << 1);
200 while (start < end) {
203 write_c0_entryhi(start);
204 start += (PAGE_SIZE << 1);
206 idx = read_c0_index();
207 write_c0_entrylo0(0);
208 write_c0_entrylo1(0);
211 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
214 write_c0_entryhi(pid);
216 local_flush_tlb_all();
218 local_irq_restore(flags);
221 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
223 int cpu = smp_processor_id();
225 if (cpu_context(cpu, vma->vm_mm) != 0) {
227 int oldpid, newpid, idx;
229 newpid = cpu_asid(cpu, vma->vm_mm);
230 page &= (PAGE_MASK << 1);
231 local_irq_save(flags);
232 oldpid = read_c0_entryhi();
233 write_c0_entryhi(page | newpid);
235 idx = read_c0_index();
236 write_c0_entrylo0(0);
237 write_c0_entrylo1(0);
240 /* Make sure all entries differ. */
241 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
245 write_c0_entryhi(oldpid);
246 local_irq_restore(flags);
251 * Remove one kernel space TLB entry. This entry is assumed to be marked
252 * global so we don't do the ASID thing.
254 void local_flush_tlb_one(unsigned long page)
259 local_irq_save(flags);
260 oldpid = read_c0_entryhi();
261 page &= (PAGE_MASK << 1);
262 write_c0_entryhi(page);
264 idx = read_c0_index();
265 write_c0_entrylo0(0);
266 write_c0_entrylo1(0);
268 /* Make sure all entries differ. */
269 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
273 write_c0_entryhi(oldpid);
274 local_irq_restore(flags);
277 /* All entries common to a mm share an asid. To effectively flush
278 these entries, we just bump the asid. */
279 void local_flush_tlb_mm(struct mm_struct *mm)
285 cpu = smp_processor_id();
287 if (cpu_context(cpu, mm) != 0) {
288 drop_mmu_context(mm, cpu);
294 /* Stolen from mips32 routines */
296 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
306 * Handle debugger faulting in for debugee.
308 if (current->active_mm != vma->vm_mm)
311 local_irq_save(flags);
313 pid = read_c0_entryhi() & ASID_MASK;
314 address &= (PAGE_MASK << 1);
315 write_c0_entryhi(address | pid);
316 pgdp = pgd_offset(vma->vm_mm, address);
318 pudp = pud_offset(pgdp, address);
319 pmdp = pmd_offset(pudp, address);
320 idx = read_c0_index();
321 ptep = pte_offset_map(pmdp, address);
323 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
324 write_c0_entrylo0(ptep->pte_high);
326 write_c0_entrylo1(ptep->pte_high);
328 write_c0_entrylo0(pte_val(*ptep++) >> 6);
329 write_c0_entrylo1(pte_val(*ptep) >> 6);
335 local_irq_restore(flags);
338 void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
339 unsigned long entryhi, unsigned long pagemask)
343 unsigned long old_pagemask;
344 unsigned long old_ctx;
346 local_irq_save(flags);
347 /* Save old context and create impossible VPN2 value */
348 old_ctx = read_c0_entryhi();
349 old_pagemask = read_c0_pagemask();
350 wired = read_c0_wired();
351 write_c0_wired(wired + 1);
352 write_c0_index(wired);
354 write_c0_pagemask(pagemask);
355 write_c0_entryhi(entryhi);
356 write_c0_entrylo0(entrylo0);
357 write_c0_entrylo1(entrylo1);
360 write_c0_entryhi(old_ctx);
361 write_c0_pagemask(old_pagemask);
363 local_flush_tlb_all();
364 local_irq_restore(flags);
368 * This is called from loadmmu.c. We have to set up all the
369 * memory management function pointers, as well as initialize
370 * the caches and tlbs
374 write_c0_pagemask(PM_DEFAULT_MASK);
378 * We don't know what state the firmware left the TLB's in, so this is
379 * the ultra-conservative way to flush the TLB's and avoid machine
380 * check exceptions due to duplicate TLB entries
384 build_tlb_refill_handler();