www.usr.com/support/gpl/USR9113_release1.0.tar.gz
[bcm963xx.git] / kernel / linux / arch / mips / mm / c-r4k.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  */
10 #include <linux/config.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/bitops.h>
16
17 #include <asm/bcache.h>
18 #include <asm/bootinfo.h>
19 #include <asm/cacheops.h>
20 #include <asm/cpu.h>
21 #include <asm/cpu-features.h>
22 #include <asm/io.h>
23 #include <asm/page.h>
24 #include <asm/pgtable.h>
25 #include <asm/r4kcache.h>
26 #include <asm/system.h>
27 #include <asm/mmu_context.h>
28 #include <asm/war.h>
29
30 static unsigned long icache_size, dcache_size, scache_size;
31
32 /*
33  * Dummy cache handling routines for machines without boardcaches
34  */
35 static void no_sc_noop(void) {}
36
37 static struct bcache_ops no_sc_ops = {
38         .bc_enable = (void *)no_sc_noop,
39         .bc_disable = (void *)no_sc_noop,
40         .bc_wback_inv = (void *)no_sc_noop,
41         .bc_inv = (void *)no_sc_noop
42 };
43
44 struct bcache_ops *bcops = &no_sc_ops;
45
46 #define cpu_is_r4600_v1_x()     ((read_c0_prid() & 0xfffffff0) == 0x2010)
47 #define cpu_is_r4600_v2_x()     ((read_c0_prid() & 0xfffffff0) == 0x2020)
48
49 #define R4600_HIT_CACHEOP_WAR_IMPL                                      \
50 do {                                                                    \
51         if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())            \
52                 *(volatile unsigned long *)KSEG1;                       \
53         if (R4600_V1_HIT_CACHEOP_WAR)                                   \
54                 __asm__ __volatile__("nop;nop;nop;nop");                \
55 } while (0)
56
57 static void (*r4k_blast_dcache_page)(unsigned long addr);
58
59 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
60 {
61         R4600_HIT_CACHEOP_WAR_IMPL;
62         blast_dcache32_page(addr);
63 }
64
65 static inline void r4k_blast_dcache_page_setup(void)
66 {
67         unsigned long  dc_lsize = cpu_dcache_line_size();
68
69         if (dc_lsize == 16)
70                 r4k_blast_dcache_page = blast_dcache16_page;
71         else if (dc_lsize == 32)
72                 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
73 }
74
75 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
76
77 static inline void r4k_blast_dcache_page_indexed_setup(void)
78 {
79         unsigned long dc_lsize = cpu_dcache_line_size();
80
81         if (dc_lsize == 16)
82                 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
83         else if (dc_lsize == 32)
84                 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
85 }
86
87 static void (* r4k_blast_dcache)(void);
88
89 static inline void r4k_blast_dcache_setup(void)
90 {
91         unsigned long dc_lsize = cpu_dcache_line_size();
92
93         if (dc_lsize == 16)
94                 r4k_blast_dcache = blast_dcache16;
95         else if (dc_lsize == 32)
96                 r4k_blast_dcache = blast_dcache32;
97 }
98
99 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
100 #define JUMP_TO_ALIGN(order) \
101         __asm__ __volatile__( \
102                 "b\t1f\n\t" \
103                 ".align\t" #order "\n\t" \
104                 "1:\n\t" \
105                 )
106 #define CACHE32_UNROLL32_ALIGN  JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
107 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
108
109 static inline void blast_r4600_v1_icache32(void)
110 {
111         unsigned long flags;
112
113         local_irq_save(flags);
114         blast_icache32();
115         local_irq_restore(flags);
116 }
117
118 static inline void tx49_blast_icache32(void)
119 {
120         unsigned long start = INDEX_BASE;
121         unsigned long end = start + current_cpu_data.icache.waysize;
122         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
123         unsigned long ws_end = current_cpu_data.icache.ways <<
124                                current_cpu_data.icache.waybit;
125         unsigned long ws, addr;
126
127         CACHE32_UNROLL32_ALIGN2;
128         /* I'm in even chunk.  blast odd chunks */
129         for (ws = 0; ws < ws_end; ws += ws_inc) 
130                 for (addr = start + 0x400; addr < end; addr += 0x400 * 2) 
131                         cache32_unroll32(addr|ws,Index_Invalidate_I);
132         CACHE32_UNROLL32_ALIGN;
133         /* I'm in odd chunk.  blast even chunks */
134         for (ws = 0; ws < ws_end; ws += ws_inc) 
135                 for (addr = start; addr < end; addr += 0x400 * 2) 
136                         cache32_unroll32(addr|ws,Index_Invalidate_I);
137 }
138
139 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
140 {
141         unsigned long flags;
142
143         local_irq_save(flags);
144         blast_icache32_page_indexed(page);
145         local_irq_restore(flags);
146 }
147
148 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
149 {
150         unsigned long start = page;
151         unsigned long end = start + PAGE_SIZE;
152         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
153         unsigned long ws_end = current_cpu_data.icache.ways <<
154                                current_cpu_data.icache.waybit;
155         unsigned long ws, addr;
156
157         CACHE32_UNROLL32_ALIGN2;
158         /* I'm in even chunk.  blast odd chunks */
159         for (ws = 0; ws < ws_end; ws += ws_inc) 
160                 for (addr = start + 0x400; addr < end; addr += 0x400 * 2) 
161                         cache32_unroll32(addr|ws,Index_Invalidate_I);
162         CACHE32_UNROLL32_ALIGN;
163         /* I'm in odd chunk.  blast even chunks */
164         for (ws = 0; ws < ws_end; ws += ws_inc) 
165                 for (addr = start; addr < end; addr += 0x400 * 2) 
166                         cache32_unroll32(addr|ws,Index_Invalidate_I);
167 }
168
169 static void (* r4k_blast_icache_page)(unsigned long addr);
170
171 static inline void r4k_blast_icache_page_setup(void)
172 {
173         unsigned long ic_lsize = cpu_icache_line_size();
174
175         if (ic_lsize == 16)
176                 r4k_blast_icache_page = blast_icache16_page;
177         else if (ic_lsize == 32)
178                 r4k_blast_icache_page = blast_icache32_page;
179         else if (ic_lsize == 64)
180                 r4k_blast_icache_page = blast_icache64_page;
181 }
182
183
184 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
185
186 static inline void r4k_blast_icache_page_indexed_setup(void)
187 {
188         unsigned long ic_lsize = cpu_icache_line_size();
189
190         if (ic_lsize == 16)
191                 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
192         else if (ic_lsize == 32) {
193                 if (TX49XX_ICACHE_INDEX_INV_WAR)
194                         r4k_blast_icache_page_indexed =
195                                 tx49_blast_icache32_page_indexed;
196                 else if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
197                         r4k_blast_icache_page_indexed =
198                                 blast_icache32_r4600_v1_page_indexed;
199                 else
200                         r4k_blast_icache_page_indexed =
201                                 blast_icache32_page_indexed;
202         } else if (ic_lsize == 64)
203                 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
204 }
205
206 static void (* r4k_blast_icache)(void);
207
208 static inline void r4k_blast_icache_setup(void)
209 {
210         unsigned long ic_lsize = cpu_icache_line_size();
211
212         if (ic_lsize == 16)
213                 r4k_blast_icache = blast_icache16;
214         else if (ic_lsize == 32) {
215                 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
216                         r4k_blast_icache = blast_r4600_v1_icache32;
217                 else if (TX49XX_ICACHE_INDEX_INV_WAR)
218                         r4k_blast_icache = tx49_blast_icache32;
219                 else
220                         r4k_blast_icache = blast_icache32;
221         } else if (ic_lsize == 64)
222                 r4k_blast_icache = blast_icache64;
223 }
224
225 static void (* r4k_blast_scache_page)(unsigned long addr);
226
227 static inline void r4k_blast_scache_page_setup(void)
228 {
229         unsigned long sc_lsize = cpu_scache_line_size();
230
231         if (sc_lsize == 16)
232                 r4k_blast_scache_page = blast_scache16_page;
233         else if (sc_lsize == 32)
234                 r4k_blast_scache_page = blast_scache32_page;
235         else if (sc_lsize == 64)
236                 r4k_blast_scache_page = blast_scache64_page;
237         else if (sc_lsize == 128)
238                 r4k_blast_scache_page = blast_scache128_page;
239 }
240
241 static void (* r4k_blast_scache)(void);
242
243 static inline void r4k_blast_scache_setup(void)
244 {
245         unsigned long sc_lsize = cpu_scache_line_size();
246
247         if (sc_lsize == 16)
248                 r4k_blast_scache = blast_scache16;
249         else if (sc_lsize == 32)
250                 r4k_blast_scache = blast_scache32;
251         else if (sc_lsize == 64)
252                 r4k_blast_scache = blast_scache64;
253         else if (sc_lsize == 128)
254                 r4k_blast_scache = blast_scache128;
255 }
256
257 static void r4k_flush_cache_all(void)
258 {
259         if (!cpu_has_dc_aliases)
260                 return;
261
262         r4k_blast_dcache();
263         r4k_blast_icache();
264 }
265
266 static void r4k___flush_cache_all(void)
267 {
268         r4k_blast_dcache();
269         r4k_blast_icache();
270
271         switch (current_cpu_data.cputype) {
272         case CPU_R4000SC:
273         case CPU_R4000MC:
274         case CPU_R4400SC:
275         case CPU_R4400MC:
276         case CPU_R10000:
277         case CPU_R12000:
278                 r4k_blast_scache();
279         }
280 }
281
282 static void r4k_flush_cache_range(struct vm_area_struct *vma,
283         unsigned long start, unsigned long end)
284 {
285         int exec;
286
287         if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
288                 return;
289
290         exec = vma->vm_flags & VM_EXEC;
291         if (cpu_has_dc_aliases || exec)
292                 r4k_blast_dcache();
293         if (exec)
294                 r4k_blast_icache();
295 }
296
297 static void r4k_flush_cache_mm(struct mm_struct *mm)
298 {
299         if (!cpu_has_dc_aliases)
300                 return;
301
302         if (!cpu_context(smp_processor_id(), mm))
303                 return;
304
305         r4k_blast_dcache();
306         r4k_blast_icache();
307
308         /*
309          * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
310          * only flush the primary caches but R10000 and R12000 behave sane ...
311          */
312         if (current_cpu_data.cputype == CPU_R4000SC ||
313             current_cpu_data.cputype == CPU_R4000MC ||
314             current_cpu_data.cputype == CPU_R4400SC ||
315             current_cpu_data.cputype == CPU_R4400MC)
316                 r4k_blast_scache();
317 }
318
319 static void r4k_flush_cache_page(struct vm_area_struct *vma,
320                                         unsigned long page)
321 {
322         int exec = vma->vm_flags & VM_EXEC;
323         struct mm_struct *mm = vma->vm_mm;
324         pgd_t *pgdp;
325         pmd_t *pmdp;
326         pte_t *ptep;
327
328         /*
329          * If ownes no valid ASID yet, cannot possibly have gotten
330          * this page into the cache.
331          */
332         if (cpu_context(smp_processor_id(), mm) == 0)
333                 return;
334
335         page &= PAGE_MASK;
336         pgdp = pgd_offset(mm, page);
337         pmdp = pmd_offset(pgdp, page);
338         ptep = pte_offset(pmdp, page);
339
340         /*
341          * If the page isn't marked valid, the page cannot possibly be
342          * in the cache.
343          */
344         if (!(pte_val(*ptep) & _PAGE_PRESENT))
345                 return;
346
347         /*
348          * Doing flushes for another ASID than the current one is
349          * too difficult since stupid R4k caches do a TLB translation
350          * for every cache flush operation.  So we do indexed flushes
351          * in that case, which doesn't overly flush the cache too much.
352          */
353         if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
354                 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
355                         r4k_blast_dcache_page(page);
356                 if (exec)
357                         r4k_blast_icache_page(page);
358
359                 return;
360         }
361
362         /*
363          * Do indexed flush, too much work to get the (possible) TLB refills
364          * to work correctly.
365          */
366         page = INDEX_BASE + (page & (dcache_size - 1));
367         if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
368                 r4k_blast_dcache_page_indexed(page);
369         if (exec) {
370                 if (cpu_has_vtag_icache) {
371                         int cpu = smp_processor_id();
372
373                         if (cpu_context(cpu, vma->vm_mm) != 0)
374                                 drop_mmu_context(vma->vm_mm, cpu);
375                 } else
376                         r4k_blast_icache_page_indexed(page);
377         }
378 }
379
380 static void r4k_flush_data_cache_page(unsigned long addr)
381 {
382         r4k_blast_dcache_page(addr);
383 }
384
385 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
386 {
387         unsigned long dc_lsize = current_cpu_data.dcache.linesz;
388         unsigned long ic_lsize = current_cpu_data.icache.linesz;
389         unsigned long addr, aend;
390
391         if (!cpu_has_ic_fills_f_dc) {
392                 if (end - start > dcache_size)
393                         r4k_blast_dcache();
394                 else {
395                         addr = start & ~(dc_lsize - 1);
396                         aend = (end - 1) & ~(dc_lsize - 1);
397
398                         while (1) {
399                                 /* Hit_Writeback_Inv_D */
400                                 protected_writeback_dcache_line(addr);
401                                 if (addr == aend)
402                                         break;
403                                 addr += dc_lsize;
404                         }
405                 }
406         }
407
408         if (end - start > icache_size)
409                 r4k_blast_icache();
410         else {
411                 addr = start & ~(ic_lsize - 1);
412                 aend = (end - 1) & ~(ic_lsize - 1);
413                 while (1) {
414                         /* Hit_Invalidate_I */
415                         protected_flush_icache_line(addr);
416                         if (addr == aend)
417                                 break;
418                         addr += ic_lsize;
419                 }
420         }
421 }
422
423 /*
424  * Ok, this seriously sucks.  We use them to flush a user page but don't
425  * know the virtual address, so we have to blast away the whole icache
426  * which is significantly more expensive than the real thing.  Otoh we at
427  * least know the kernel address of the page so we can flush it
428  * selectivly.
429  */
430 static void r4k_flush_icache_page(struct vm_area_struct *vma, struct page *page)
431 {
432         /*
433          * If there's no context yet, or the page isn't executable, no icache
434          * flush is needed.
435          */
436         if (!(vma->vm_flags & VM_EXEC))
437                 return;
438
439         /*
440          * Tricky ...  Because we don't know the virtual address we've got the
441          * choice of either invalidating the entire primary and secondary
442          * caches or invalidating the secondary caches also.  With the subset
443          * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the
444          * secondary cache will result in any entries in the primary caches
445          * also getting invalidated which hopefully is a bit more economical.
446          */
447         if (cpu_has_subset_pcaches) {
448                 unsigned long addr = (unsigned long) page_address(page);
449
450                 r4k_blast_scache_page(addr);
451                 ClearPageDcacheDirty(page);
452
453                 return;
454         }
455
456         if (!cpu_has_ic_fills_f_dc) {
457                 unsigned long addr = (unsigned long) page_address(page);
458                 r4k_blast_dcache_page(addr);
459                 ClearPageDcacheDirty(page);
460         }
461
462         /*
463          * We're not sure of the virtual address(es) involved here, so
464          * we have to flush the entire I-cache.
465          */
466         if (cpu_has_vtag_icache) {
467                 int cpu = smp_processor_id();
468
469                 if (cpu_context(cpu, vma->vm_mm) != 0)
470                         drop_mmu_context(vma->vm_mm, cpu);
471         } else
472                 r4k_blast_icache();
473 }
474
475 #ifdef CONFIG_DMA_NONCOHERENT
476
477 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
478 {
479         unsigned long end, a;
480
481         /* Catch bad driver code */
482         BUG_ON(size == 0);
483
484         if (cpu_has_subset_pcaches) {
485                 unsigned long sc_lsize = current_cpu_data.scache.linesz;
486
487                 if (size >= scache_size) {
488                         r4k_blast_scache();
489                         return;
490                 }
491
492                 a = addr & ~(sc_lsize - 1);
493                 end = (addr + size - 1) & ~(sc_lsize - 1);
494                 while (1) {
495                         flush_scache_line(a);   /* Hit_Writeback_Inv_SD */
496                         if (a == end)
497                                 break;
498                         a += sc_lsize;
499                 }
500                 return;
501         }
502
503         /*
504          * Either no secondary cache or the available caches don't have the
505          * subset property so we have to flush the primary caches
506          * explicitly
507          */
508         if (size >= dcache_size) {
509                 r4k_blast_dcache();
510         } else {
511                 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
512
513                 R4600_HIT_CACHEOP_WAR_IMPL;
514                 a = addr & ~(dc_lsize - 1);
515                 end = (addr + size - 1) & ~(dc_lsize - 1);
516                 while (1) {
517                         flush_dcache_line(a);   /* Hit_Writeback_Inv_D */
518                         if (a == end)
519                                 break;
520                         a += dc_lsize;
521                 }
522         }
523
524         bc_wback_inv(addr, size);
525 }
526
527 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
528 {
529         unsigned long end, a;
530
531         /* Catch bad driver code */
532         BUG_ON(size == 0);
533
534         if (cpu_has_subset_pcaches) {
535                 unsigned long sc_lsize = current_cpu_data.scache.linesz;
536
537                 if (size >= scache_size) {
538                         r4k_blast_scache();
539                         return;
540                 }
541
542                 a = addr & ~(sc_lsize - 1);
543                 end = (addr + size - 1) & ~(sc_lsize - 1);
544                 while (1) {
545                         flush_scache_line(a);   /* Hit_Writeback_Inv_SD */
546                         if (a == end)
547                                 break;
548                         a += sc_lsize;
549                 }
550                 return;
551         }
552
553         if (size >= dcache_size) {
554                 r4k_blast_dcache();
555         } else {
556                 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
557
558                 R4600_HIT_CACHEOP_WAR_IMPL;
559                 a = addr & ~(dc_lsize - 1);
560                 end = (addr + size - 1) & ~(dc_lsize - 1);
561                 while (1) {
562                         flush_dcache_line(a);   /* Hit_Writeback_Inv_D */
563                         if (a == end)
564                                 break;
565                         a += dc_lsize;
566                 }
567         }
568
569         bc_inv(addr, size);
570 }
571 #endif /* CONFIG_DMA_NONCOHERENT */
572
573 /*
574  * While we're protected against bad userland addresses we don't care
575  * very much about what happens in that case.  Usually a segmentation
576  * fault will dump the process later on anyway ...
577  */
578 static void r4k_flush_cache_sigtramp(unsigned long addr)
579 {
580         unsigned long ic_lsize = current_cpu_data.icache.linesz;
581         unsigned long dc_lsize = current_cpu_data.dcache.linesz;
582
583         R4600_HIT_CACHEOP_WAR_IMPL;
584         protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
585         protected_flush_icache_line(addr & ~(ic_lsize - 1));
586         if (MIPS4K_ICACHE_REFILL_WAR) {
587                 __asm__ __volatile__ (
588                         ".set push\n\t"
589                         ".set noat\n\t"
590                         ".set mips3\n\t"
591 #if CONFIG_MIPS32
592                         "la     $at,1f\n\t"
593 #endif
594 #if CONFIG_MIPS64
595                         "dla    $at,1f\n\t"
596 #endif
597                         "cache  %0,($at)\n\t"
598                         "nop; nop; nop\n"
599                         "1:\n\t"
600                         ".set pop"
601                         :
602                         : "i" (Hit_Invalidate_I));
603         }
604         if (MIPS_CACHE_SYNC_WAR)
605                 __asm__ __volatile__ ("sync");
606 }
607
608 static void r4k_flush_icache_all(void)
609 {
610         if (cpu_has_vtag_icache)
611                 r4k_blast_icache();
612 }
613
614 static inline void rm7k_erratum31(void)
615 {
616         const unsigned long ic_lsize = 32;
617         unsigned long addr;
618
619         /* RM7000 erratum #31. The icache is screwed at startup. */
620         write_c0_taglo(0);
621         write_c0_taghi(0);
622
623         for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
624                 __asm__ __volatile__ (
625                         ".set noreorder\n\t"
626                         ".set mips3\n\t"
627                         "cache\t%1, 0(%0)\n\t"
628                         "cache\t%1, 0x1000(%0)\n\t"
629                         "cache\t%1, 0x2000(%0)\n\t"
630                         "cache\t%1, 0x3000(%0)\n\t"
631                         "cache\t%2, 0(%0)\n\t"
632                         "cache\t%2, 0x1000(%0)\n\t"
633                         "cache\t%2, 0x2000(%0)\n\t"
634                         "cache\t%2, 0x3000(%0)\n\t"
635                         "cache\t%1, 0(%0)\n\t"
636                         "cache\t%1, 0x1000(%0)\n\t"
637                         "cache\t%1, 0x2000(%0)\n\t"
638                         "cache\t%1, 0x3000(%0)\n\t"
639                         ".set\tmips0\n\t"
640                         ".set\treorder\n\t"
641                         :
642                         : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
643         }
644 }
645
646 static char *way_string[] = { NULL, "direct mapped", "2-way", "3-way", "4-way",
647         "5-way", "6-way", "7-way", "8-way"
648 };
649
650 static void __init probe_pcache(void)
651 {
652         struct cpuinfo_mips *c = &current_cpu_data;
653         unsigned int config = read_c0_config();
654         unsigned int prid = read_c0_prid();
655         unsigned long config1;
656         unsigned int lsize;
657
658         switch (c->cputype) {
659         case CPU_R4600:                 /* QED style two way caches? */
660         case CPU_R4700:
661         case CPU_R5000:
662         case CPU_NEVADA:
663                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
664                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
665                 c->icache.ways = 2;
666                 c->icache.waybit = ffs(icache_size/2) - 1;
667
668                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
669                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
670                 c->dcache.ways = 2;
671                 c->dcache.waybit= ffs(dcache_size/2) - 1;
672
673                 c->options |= MIPS_CPU_CACHE_CDEX_P;
674                 break;
675
676         case CPU_R5432:
677         case CPU_R5500:
678                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
679                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
680                 c->icache.ways = 2;
681                 c->icache.waybit= 0;
682
683                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
684                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
685                 c->dcache.ways = 2;
686                 c->dcache.waybit = 0;
687
688                 c->options |= MIPS_CPU_CACHE_CDEX_P;
689                 break;
690
691         case CPU_TX49XX:
692                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
693                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
694                 c->icache.ways = 4;
695                 c->icache.waybit= 0;
696
697                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
698                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
699                 c->dcache.ways = 4;
700                 c->dcache.waybit = 0;
701
702                 c->options |= MIPS_CPU_CACHE_CDEX_P;
703                 break;
704
705         case CPU_R4000PC:
706         case CPU_R4000SC:
707         case CPU_R4000MC:
708         case CPU_R4400PC:
709         case CPU_R4400SC:
710         case CPU_R4400MC:
711         case CPU_R4300:
712                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
713                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
714                 c->icache.ways = 1;
715                 c->icache.waybit = 0;   /* doesn't matter */
716
717                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
718                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
719                 c->dcache.ways = 1;
720                 c->dcache.waybit = 0;   /* does not matter */
721
722                 c->options |= MIPS_CPU_CACHE_CDEX_P;
723                 break;
724
725         case CPU_R10000:
726         case CPU_R12000:
727                 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
728                 c->icache.linesz = 64;
729                 c->icache.ways = 2;
730                 c->icache.waybit = 0;
731
732                 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
733                 c->dcache.linesz = 32;
734                 c->dcache.ways = 2;
735                 c->dcache.waybit = 0;
736
737                 c->options |= MIPS_CPU_PREFETCH;
738                 break;
739
740         case CPU_VR4133:
741                 write_c0_config(config & ~CONF_EB);
742         case CPU_VR4131:
743                 /* Workaround for cache instruction bug of VR4131 */
744                 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
745                     c->processor_id == 0x0c82U) {
746                         config &= ~0x00000030U;
747                         config |= 0x00410000U;
748                         write_c0_config(config);
749                 }
750                 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
751                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
752                 c->icache.ways = 2;
753                 c->icache.waybit = ffs(icache_size/2) - 1;
754
755                 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
756                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
757                 c->dcache.ways = 2;
758                 c->dcache.waybit = ffs(dcache_size/2) - 1;
759
760                 c->options |= MIPS_CPU_CACHE_CDEX_P;
761                 break;
762
763         case CPU_VR41XX:
764         case CPU_VR4111:
765         case CPU_VR4121:
766         case CPU_VR4122:
767         case CPU_VR4181:
768         case CPU_VR4181A:
769                 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
770                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
771                 c->icache.ways = 1;
772                 c->icache.waybit = 0;   /* doesn't matter */
773
774                 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
775                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
776                 c->dcache.ways = 1;
777                 c->dcache.waybit = 0;   /* does not matter */
778
779                 c->options |= MIPS_CPU_CACHE_CDEX_P;
780                 break;
781
782         case CPU_RM7000:
783                 rm7k_erratum31();
784
785         case CPU_RM9000:
786                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
787                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
788                 c->icache.ways = 4;
789                 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
790
791                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
792                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
793                 c->dcache.ways = 4;
794                 c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
795
796 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
797                 c->options |= MIPS_CPU_CACHE_CDEX_P;
798 #endif
799                 c->options |= MIPS_CPU_PREFETCH;
800                 break;
801
802         default:
803                 if (!(config & MIPS_CONF_M))
804                         panic("Don't know how to probe P-caches on this cpu.");
805
806 #if defined(CONFIG_MIPS_BRCM)
807                 printk("brcm mips: enabling icache and dcache...\n");
808                     /* Enable caches */
809                     write_c0_diag(read_c0_diag() | 0xC0000000);
810 #endif
811                 /*
812                  * So we seem to be a MIPS32 or MIPS64 CPU
813                  * So let's probe the I-cache ...
814                  */
815                 config1 = read_c0_config1();
816
817                 if ((lsize = ((config1 >> 19) & 7)))
818                         c->icache.linesz = 2 << lsize;
819                 else
820                         c->icache.linesz = lsize;
821                 c->icache.sets = 64 << ((config1 >> 22) & 7);
822                 c->icache.ways = 1 + ((config1 >> 16) & 7);
823
824                 icache_size = c->icache.sets *
825                               c->icache.ways *
826                               c->icache.linesz;
827                 c->icache.waybit = ffs(icache_size/c->icache.ways) - 1;
828
829                 if (config & 0x8)               /* VI bit */
830                         c->icache.flags |= MIPS_CACHE_VTAG;
831
832                 /*
833                  * Now probe the MIPS32 / MIPS64 data cache.
834                  */
835                 c->dcache.flags = 0;
836
837                 if ((lsize = ((config1 >> 10) & 7)))
838                         c->dcache.linesz = 2 << lsize;
839                 else
840                         c->dcache.linesz= lsize;
841                 c->dcache.sets = 64 << ((config1 >> 13) & 7);
842                 c->dcache.ways = 1 + ((config1 >> 7) & 7);
843
844                 dcache_size = c->dcache.sets *
845                               c->dcache.ways *
846                               c->dcache.linesz;
847                 c->dcache.waybit = ffs(dcache_size/c->dcache.ways) - 1;
848
849                 c->options |= MIPS_CPU_PREFETCH;
850                 break;
851         }
852
853         /*
854          * Processor configuration sanity check for the R4000SC erratum
855          * #5.  With page sizes larger than 32kB there is no possibility
856          * to get a VCE exception anymore so we don't care about this
857          * misconfiguration.  The case is rather theoretical anyway;
858          * presumably no vendor is shipping his hardware in the "bad"
859          * configuration.
860          */
861         if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
862             !(config & CONF_SC) && c->icache.linesz != 16 &&
863             PAGE_SIZE <= 0x8000)
864                 panic("Improper R4000SC processor configuration detected");
865
866         /* compute a couple of other cache variables */
867         c->icache.waysize = icache_size / c->icache.ways;
868         c->dcache.waysize = dcache_size / c->dcache.ways;
869
870         c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
871         c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
872
873         /*
874          * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
875          * 2-way virtually indexed so normally would suffer from aliases.  So
876          * normally they'd suffer from aliases but magic in the hardware deals
877          * with that for us so we don't need to take care ourselves.
878          */
879         if (c->cputype != CPU_R10000 && c->cputype != CPU_R12000)
880                 if (c->dcache.waysize > PAGE_SIZE)
881                         c->dcache.flags |= MIPS_CACHE_ALIASES;
882
883         switch (c->cputype) {
884         case CPU_20KC:
885                 /*
886                  * Some older 20Kc chips doesn't have the 'VI' bit in
887                  * the config register.
888                  */
889                 c->icache.flags |= MIPS_CACHE_VTAG;
890                 break;
891
892         case CPU_AU1500:
893                 c->icache.flags |= MIPS_CACHE_IC_F_DC;
894                 break;
895         }
896
897         printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
898                icache_size >> 10,
899                cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
900                way_string[c->icache.ways], c->icache.linesz);
901
902         printk("Primary data cache %ldkB %s, linesize %d bytes.\n",
903                dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
904 }
905
906 /*
907  * If you even _breathe_ on this function, look at the gcc output and make sure
908  * it does not pop things on and off the stack for the cache sizing loop that
909  * executes in KSEG1 space or else you will crash and burn badly.  You have
910  * been warned.
911  */
912 static int __init probe_scache(void)
913 {
914         extern unsigned long stext;
915         unsigned long flags, addr, begin, end, pow2;
916         unsigned int config = read_c0_config();
917         struct cpuinfo_mips *c = &current_cpu_data;
918         int tmp;
919
920         if (config & CONF_SC)
921                 return 0;
922
923         begin = (unsigned long) &stext;
924         begin &= ~((4 * 1024 * 1024) - 1);
925         end = begin + (4 * 1024 * 1024);
926
927         /*
928          * This is such a bitch, you'd think they would make it easy to do
929          * this.  Away you daemons of stupidity!
930          */
931         local_irq_save(flags);
932
933         /* Fill each size-multiple cache line with a valid tag. */
934         pow2 = (64 * 1024);
935         for (addr = begin; addr < end; addr = (begin + pow2)) {
936                 unsigned long *p = (unsigned long *) addr;
937                 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
938                 pow2 <<= 1;
939         }
940
941         /* Load first line with zero (therefore invalid) tag. */
942         write_c0_taglo(0);
943         write_c0_taghi(0);
944         __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
945         cache_op(Index_Store_Tag_I, begin);
946         cache_op(Index_Store_Tag_D, begin);
947         cache_op(Index_Store_Tag_SD, begin);
948
949         /* Now search for the wrap around point. */
950         pow2 = (128 * 1024);
951         tmp = 0;
952         for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
953                 cache_op(Index_Load_Tag_SD, addr);
954                 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
955                 if (!read_c0_taglo())
956                         break;
957                 pow2 <<= 1;
958         }
959         local_irq_restore(flags);
960         addr -= begin;
961
962         scache_size = addr;
963         c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
964         c->scache.ways = 1;
965         c->dcache.waybit = 0;           /* does not matter */
966
967         return 1;
968 }
969
970 typedef int (*probe_func_t)(unsigned long);
971 extern int r5k_sc_init(void);
972 extern int rm7k_sc_init(void);
973
974 static void __init setup_scache(void)
975 {
976         struct cpuinfo_mips *c = &current_cpu_data;
977         unsigned int config = read_c0_config();
978         probe_func_t probe_scache_kseg1;
979         int sc_present = 0;
980
981         /*
982          * Do the probing thing on R4000SC and R4400SC processors.  Other
983          * processors don't have a S-cache that would be relevant to the
984          * Linux memory managment.
985          */
986         switch (c->cputype) {
987         case CPU_R4000SC:
988         case CPU_R4000MC:
989         case CPU_R4400SC:
990         case CPU_R4400MC:
991                 probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
992                 sc_present = probe_scache_kseg1(config);
993                 if (sc_present)
994                         c->options |= MIPS_CPU_CACHE_CDEX_S;
995                 break;
996
997         case CPU_R10000:
998         case CPU_R12000:
999                 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1000                 c->scache.linesz = 64 << ((config >> 13) & 1);
1001                 c->scache.ways = 2;
1002                 c->scache.waybit= 0;
1003                 sc_present = 1;
1004                 break;
1005
1006         case CPU_R5000:
1007         case CPU_NEVADA:
1008 #ifdef CONFIG_R5000_CPU_SCACHE
1009                 r5k_sc_init();
1010 #endif
1011                 return;
1012
1013         case CPU_RM7000:
1014         case CPU_RM9000:
1015 #ifdef CONFIG_RM7000_CPU_SCACHE
1016                 rm7k_sc_init();
1017 #endif
1018                 return;
1019
1020         default:
1021                 sc_present = 0;
1022         }
1023
1024         if (!sc_present)
1025                 return;
1026
1027         if ((c->isa_level == MIPS_CPU_ISA_M32 ||
1028              c->isa_level == MIPS_CPU_ISA_M64) &&
1029             !(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1030                 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1031
1032         /* compute a couple of other cache variables */
1033         c->scache.waysize = scache_size / c->scache.ways;
1034
1035         c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1036
1037         printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1038                scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1039
1040         c->options |= MIPS_CPU_SUBSET_CACHES;
1041 }
1042
1043 static inline void coherency_setup(void)
1044 {
1045         change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1046
1047         /*
1048          * c0_status.cu=0 specifies that updates by the sc instruction use
1049          * the coherency mode specified by the TLB; 1 means cachable
1050          * coherent update on write will be used.  Not all processors have
1051          * this bit and; some wire it to zero, others like Toshiba had the
1052          * silly idea of putting something else there ...
1053          */
1054         switch (current_cpu_data.cputype) {
1055         case CPU_R4000PC:
1056         case CPU_R4000SC:
1057         case CPU_R4000MC:
1058         case CPU_R4400PC:
1059         case CPU_R4400SC:
1060         case CPU_R4400MC:
1061                 clear_c0_config(CONF_CU);
1062                 break;
1063         }
1064 }
1065
1066 void __init ld_mmu_r4xx0(void)
1067 {
1068         extern void build_clear_page(void);
1069         extern void build_copy_page(void);
1070         extern char except_vec2_generic;
1071         struct cpuinfo_mips *c = &current_cpu_data;
1072
1073         /* Default cache error handler for R4000 and R5000 family */
1074         memcpy((void *)(CAC_BASE   + 0x100), &except_vec2_generic, 0x80);
1075         memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_generic, 0x80);
1076
1077         probe_pcache();
1078         setup_scache();
1079
1080         if (c->dcache.sets * c->dcache.ways > PAGE_SIZE)
1081                 c->dcache.flags |= MIPS_CACHE_ALIASES;
1082
1083         r4k_blast_dcache_page_setup();
1084         r4k_blast_dcache_page_indexed_setup();
1085         r4k_blast_dcache_setup();
1086         r4k_blast_icache_page_setup();
1087         r4k_blast_icache_page_indexed_setup();
1088         r4k_blast_icache_setup();
1089         r4k_blast_scache_page_setup();
1090         r4k_blast_scache_setup();
1091
1092         /*
1093          * Some MIPS32 and MIPS64 processors have physically indexed caches.
1094          * This code supports virtually indexed processors and will be
1095          * unnecessarily inefficient on physically indexed processors.
1096          */
1097         shm_align_mask = max_t( unsigned long,
1098                                 c->dcache.sets * c->dcache.linesz - 1,
1099                                 PAGE_SIZE - 1);
1100
1101         flush_cache_all         = r4k_flush_cache_all;
1102         __flush_cache_all       = r4k___flush_cache_all;
1103         flush_cache_mm          = r4k_flush_cache_mm;
1104         flush_cache_page        = r4k_flush_cache_page;
1105         flush_icache_page       = r4k_flush_icache_page;
1106         flush_cache_range       = r4k_flush_cache_range;
1107
1108         flush_cache_sigtramp    = r4k_flush_cache_sigtramp;
1109         flush_icache_all        = r4k_flush_icache_all;
1110         flush_data_cache_page   = r4k_flush_data_cache_page;
1111         flush_icache_range      = r4k_flush_icache_range;
1112
1113 #ifdef CONFIG_DMA_NONCOHERENT
1114         _dma_cache_wback_inv    = r4k_dma_cache_wback_inv;
1115         _dma_cache_wback        = r4k_dma_cache_wback_inv;
1116         _dma_cache_inv          = r4k_dma_cache_inv;
1117 #endif
1118
1119         __flush_cache_all();
1120         coherency_setup();
1121
1122         build_clear_page();
1123         build_copy_page();
1124 }