2 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
3 * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
4 * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
5 * Copyright (C) 2004 Maciej W. Rozycki
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/config.h>
22 #include <linux/init.h>
23 #include <asm/mmu_context.h>
24 #include <asm/bootinfo.h>
25 #include <asm/cacheops.h>
27 #include <asm/uaccess.h>
29 extern void sb1_dma_init(void);
31 /* These are probed at ld_mmu time */
32 static unsigned long icache_size;
33 static unsigned long dcache_size;
35 static unsigned short icache_line_size;
36 static unsigned short dcache_line_size;
38 static unsigned int icache_index_mask;
39 static unsigned int dcache_index_mask;
41 static unsigned short icache_assoc;
42 static unsigned short dcache_assoc;
44 static unsigned short icache_sets;
45 static unsigned short dcache_sets;
47 static unsigned int icache_range_cutoff;
48 static unsigned int dcache_range_cutoff;
51 * The dcache is fully coherent to the system, with one
52 * big caveat: the instruction stream. In other words,
53 * if we miss in the icache, and have dirty data in the
54 * L1 dcache, then we'll go out to memory (or the L2) and
55 * get the not-as-recent data.
57 * So the only time we have to flush the dcache is when
58 * we're flushing the icache. Since the L2 is fully
59 * coherent to everything, including I/O, we never have
64 * Writeback and invalidate the entire dcache
66 static inline void __sb1_writeback_inv_dcache_all(void)
68 __asm__ __volatile__ (
73 " move $1, $0 \n" /* Start at index 0 */
74 "1: cache %2, 0($1) \n" /* Invalidate this index */
75 " cache %2, (1<<13)($1)\n" /* Invalidate this index */
76 " cache %2, (2<<13)($1)\n" /* Invalidate this index */
77 " cache %2, (3<<13)($1)\n" /* Invalidate this index */
78 " addiu %1, %1, -1 \n" /* Decrement loop count */
79 " bnez %1, 1b \n" /* loop test */
80 " addu $1, $1, %0 \n" /* Next address */
83 : "r" (dcache_line_size), "r" (dcache_sets),
84 "i" (Index_Writeback_Inv_D));
88 * Writeback and invalidate a range of the dcache. The addresses are
89 * virtual, and since we're using index ops and bit 12 is part of both
90 * the virtual frame and physical index, we have to clear both sets
91 * (bit 12 set and cleared).
93 static inline void __sb1_writeback_inv_dcache_range(unsigned long start,
96 __asm__ __volatile__ (
101 " and $1, %0, %3 \n" /* mask non-index bits */
102 "1: cache %4, (0<<13)($1) \n" /* Index-WB-inval this address */
103 " cache %4, (1<<13)($1) \n" /* Index-WB-inval this address */
104 " cache %4, (2<<13)($1) \n" /* Index-WB-inval this address */
105 " cache %4, (3<<13)($1) \n" /* Index-WB-inval this address */
106 " xori $1, $1, 1<<12 \n" /* flip bit 12 (va/pa alias) */
107 " cache %4, (0<<13)($1) \n" /* Index-WB-inval this address */
108 " cache %4, (1<<13)($1) \n" /* Index-WB-inval this address */
109 " cache %4, (2<<13)($1) \n" /* Index-WB-inval this address */
110 " cache %4, (3<<13)($1) \n" /* Index-WB-inval this address */
111 " addu %0, %0, %2 \n" /* next line */
112 " bne %0, %1, 1b \n" /* loop test */
113 " and $1, %0, %3 \n" /* mask non-index bits */
117 : "r" (start & ~(dcache_line_size - 1)),
118 "r" ((end + dcache_line_size - 1) & ~(dcache_line_size - 1)),
119 "r" (dcache_line_size),
120 "r" (dcache_index_mask),
121 "i" (Index_Writeback_Inv_D));
125 * Writeback and invalidate a range of the dcache. With physical
126 * addresseses, we don't have to worry about possible bit 12 aliasing.
127 * XXXKW is it worth turning on KX and using hit ops with xkphys?
129 static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start,
132 __asm__ __volatile__ (
137 " and $1, %0, %3 \n" /* mask non-index bits */
138 "1: cache %4, (0<<13)($1) \n" /* Index-WB-inval this address */
139 " cache %4, (1<<13)($1) \n" /* Index-WB-inval this address */
140 " cache %4, (2<<13)($1) \n" /* Index-WB-inval this address */
141 " cache %4, (3<<13)($1) \n" /* Index-WB-inval this address */
142 " addu %0, %0, %2 \n" /* next line */
143 " bne %0, %1, 1b \n" /* loop test */
144 " and $1, %0, %3 \n" /* mask non-index bits */
148 : "r" (start & ~(dcache_line_size - 1)),
149 "r" ((end + dcache_line_size - 1) & ~(dcache_line_size - 1)),
150 "r" (dcache_line_size),
151 "r" (dcache_index_mask),
152 "i" (Index_Writeback_Inv_D));
157 * Invalidate the entire icache
159 static inline void __sb1_flush_icache_all(void)
161 __asm__ __volatile__ (
166 " move $1, $0 \n" /* Start at index 0 */
167 "1: cache %2, 0($1) \n" /* Invalidate this index */
168 " cache %2, (1<<13)($1)\n" /* Invalidate this index */
169 " cache %2, (2<<13)($1)\n" /* Invalidate this index */
170 " cache %2, (3<<13)($1)\n" /* Invalidate this index */
171 " addiu %1, %1, -1 \n" /* Decrement loop count */
172 " bnez %1, 1b \n" /* loop test */
173 " addu $1, $1, %0 \n" /* Next address */
174 " bnezl $0, 2f \n" /* Force mispredict */
179 : "r" (icache_line_size), "r" (icache_sets),
180 "i" (Index_Invalidate_I));
184 * Flush the icache for a given physical page. Need to writeback the
185 * dcache first, then invalidate the icache. If the page isn't
186 * executable, nothing is required.
188 static void local_sb1_flush_cache_page(struct vm_area_struct *vma,
191 int cpu = smp_processor_id();
194 if (!(vma->vm_flags & VM_EXEC))
198 __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE);
201 * Bumping the ASID is probably cheaper than the flush ...
203 if (cpu_context(cpu, vma->vm_mm) != 0)
204 drop_mmu_context(vma->vm_mm, cpu);
208 struct flush_cache_page_args {
209 struct vm_area_struct *vma;
213 static void sb1_flush_cache_page_ipi(void *info)
215 struct flush_cache_page_args *args = info;
217 local_sb1_flush_cache_page(args->vma, args->addr);
220 /* Dirty dcache could be on another CPU, so do the IPIs */
221 static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr)
223 struct flush_cache_page_args args;
225 if (!(vma->vm_flags & VM_EXEC))
231 smp_call_function(sb1_flush_cache_page_ipi, (void *) &args, 1, 1);
232 local_sb1_flush_cache_page(vma, addr);
235 void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr)
236 __attribute__((alias("local_sb1_flush_cache_page")));
240 * Invalidate a range of the icache. The addresses are virtual, and
241 * the cache is virtually indexed and tagged. However, we don't
242 * necessarily have the right ASID context, so use index ops instead
245 static inline void __sb1_flush_icache_range(unsigned long start,
248 __asm__ __volatile__ (
253 " and $1, %0, %3 \n" /* mask non-index bits */
254 "1: cache %4, (0<<13)($1) \n" /* Index-inval this address */
255 " cache %4, (1<<13)($1) \n" /* Index-inval this address */
256 " cache %4, (2<<13)($1) \n" /* Index-inval this address */
257 " cache %4, (3<<13)($1) \n" /* Index-inval this address */
258 " addu %0, %0, %2 \n" /* next line */
259 " bne %0, %1, 1b \n" /* loop test */
260 " and $1, %0, %3 \n" /* mask non-index bits */
261 " bnezl $0, 2f \n" /* Force mispredict */
266 : "r" (start & ~(icache_line_size - 1)),
267 "r" ((end + icache_line_size - 1) & ~(icache_line_size - 1)),
268 "r" (icache_line_size),
269 "r" (icache_index_mask),
270 "i" (Index_Invalidate_I));
275 * Invalidate all caches on this CPU
277 static void local_sb1___flush_cache_all(void)
279 __sb1_writeback_inv_dcache_all();
280 __sb1_flush_icache_all();
284 void sb1___flush_cache_all_ipi(void *ignored)
285 __attribute__((alias("local_sb1___flush_cache_all")));
287 static void sb1___flush_cache_all(void)
289 smp_call_function(sb1___flush_cache_all_ipi, 0, 1, 1);
290 local_sb1___flush_cache_all();
293 void sb1___flush_cache_all(void)
294 __attribute__((alias("local_sb1___flush_cache_all")));
298 * When flushing a range in the icache, we have to first writeback
299 * the dcache for the same range, so new ifetches will see any
300 * data that was dirty in the dcache.
302 * The start/end arguments are Kseg addresses (possibly mapped Kseg).
305 static void local_sb1_flush_icache_range(unsigned long start,
308 /* Just wb-inv the whole dcache if the range is big enough */
309 if ((end - start) > dcache_range_cutoff)
310 __sb1_writeback_inv_dcache_all();
312 __sb1_writeback_inv_dcache_range(start, end);
314 /* Just flush the whole icache if the range is big enough */
315 if ((end - start) > icache_range_cutoff)
316 __sb1_flush_icache_all();
318 __sb1_flush_icache_range(start, end);
322 struct flush_icache_range_args {
327 static void sb1_flush_icache_range_ipi(void *info)
329 struct flush_icache_range_args *args = info;
331 local_sb1_flush_icache_range(args->start, args->end);
334 void sb1_flush_icache_range(unsigned long start, unsigned long end)
336 struct flush_icache_range_args args;
340 smp_call_function(sb1_flush_icache_range_ipi, &args, 1, 1);
341 local_sb1_flush_icache_range(start, end);
344 void sb1_flush_icache_range(unsigned long start, unsigned long end)
345 __attribute__((alias("local_sb1_flush_icache_range")));
349 * Flush the icache for a given physical page. Need to writeback the
350 * dcache first, then invalidate the icache. If the page isn't
351 * executable, nothing is required.
353 static void local_sb1_flush_icache_page(struct vm_area_struct *vma,
357 int cpu = smp_processor_id();
360 if (!(vma->vm_flags & VM_EXEC))
364 /* Need to writeback any dirty data for that page, we have the PA */
365 start = (unsigned long)(page-mem_map) << PAGE_SHIFT;
366 __sb1_writeback_inv_dcache_phys_range(start, start + PAGE_SIZE);
368 * If there's a context, bump the ASID (cheaper than a flush,
369 * since we don't know VAs!)
371 if (cpu_context(cpu, vma->vm_mm) != 0) {
372 drop_mmu_context(vma->vm_mm, cpu);
377 struct flush_icache_page_args {
378 struct vm_area_struct *vma;
382 static void sb1_flush_icache_page_ipi(void *info)
384 struct flush_icache_page_args *args = info;
385 local_sb1_flush_icache_page(args->vma, args->page);
388 /* Dirty dcache could be on another CPU, so do the IPIs */
389 static void sb1_flush_icache_page(struct vm_area_struct *vma,
392 struct flush_icache_page_args args;
394 if (!(vma->vm_flags & VM_EXEC))
398 smp_call_function(sb1_flush_icache_page_ipi, (void *) &args, 1, 1);
399 local_sb1_flush_icache_page(vma, page);
402 void sb1_flush_icache_page(struct vm_area_struct *vma, struct page *page)
403 __attribute__((alias("local_sb1_flush_icache_page")));
407 * A signal trampoline must fit into a single cacheline.
409 static void local_sb1_flush_cache_sigtramp(unsigned long addr)
411 __asm__ __volatile__ (
416 " cache %2, (0<<13)(%0) \n" /* Index-inval this address */
417 " cache %2, (1<<13)(%0) \n" /* Index-inval this address */
418 " cache %2, (2<<13)(%0) \n" /* Index-inval this address */
419 " cache %2, (3<<13)(%0) \n" /* Index-inval this address */
420 " xori $1, %0, 1<<12 \n" /* Flip index bit 12 */
421 " cache %2, (0<<13)($1) \n" /* Index-inval this address */
422 " cache %2, (1<<13)($1) \n" /* Index-inval this address */
423 " cache %2, (2<<13)($1) \n" /* Index-inval this address */
424 " cache %2, (3<<13)($1) \n" /* Index-inval this address */
425 " cache %3, (0<<13)(%1) \n" /* Index-inval this address */
426 " cache %3, (1<<13)(%1) \n" /* Index-inval this address */
427 " cache %3, (2<<13)(%1) \n" /* Index-inval this address */
428 " cache %3, (3<<13)(%1) \n" /* Index-inval this address */
429 " bnezl $0, 1f \n" /* Force mispredict */
434 : "r" (addr & dcache_index_mask), "r" (addr & icache_index_mask),
435 "i" (Index_Writeback_Inv_D), "i" (Index_Invalidate_I));
439 static void sb1_flush_cache_sigtramp_ipi(void *info)
441 unsigned long iaddr = (unsigned long) info;
442 local_sb1_flush_cache_sigtramp(iaddr);
445 static void sb1_flush_cache_sigtramp(unsigned long addr)
447 local_sb1_flush_cache_sigtramp(addr);
448 smp_call_function(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1);
451 void sb1_flush_cache_sigtramp(unsigned long addr)
452 __attribute__((alias("local_sb1_flush_cache_sigtramp")));
457 * Anything that just flushes dcache state can be ignored, as we're always
458 * coherent in dcache space. This is just a dummy function that all the
459 * nop'ed routines point to
461 static void sb1_nop(void)
466 * Cache set values (from the mips64 spec)
477 static unsigned int decode_cache_sets(unsigned int config_field)
479 if (config_field == 7) {
480 /* JDCXXX - Find a graceful way to abort. */
483 return (1<<(config_field + 6));
487 * Cache line size values (from the mips64 spec)
488 * 0 - No cache present.
498 static unsigned int decode_cache_line_size(unsigned int config_field)
500 if (config_field == 0) {
502 } else if (config_field == 7) {
503 /* JDCXXX - Find a graceful way to abort. */
506 return (1<<(config_field + 1));
510 * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs)
512 * 24:22 Icache sets per way
513 * 21:19 Icache line size
514 * 18:16 Icache Associativity
515 * 15:13 Dcache sets per way
516 * 12:10 Dcache line size
517 * 9:7 Dcache Associativity
520 static char *way_string[] = {
521 "direct mapped", "2-way", "3-way", "4-way",
522 "5-way", "6-way", "7-way", "8-way",
525 static __init void probe_cache_sizes(void)
529 config1 = read_c0_config1();
530 icache_line_size = decode_cache_line_size((config1 >> 19) & 0x7);
531 dcache_line_size = decode_cache_line_size((config1 >> 10) & 0x7);
532 icache_sets = decode_cache_sets((config1 >> 22) & 0x7);
533 dcache_sets = decode_cache_sets((config1 >> 13) & 0x7);
534 icache_assoc = ((config1 >> 16) & 0x7) + 1;
535 dcache_assoc = ((config1 >> 7) & 0x7) + 1;
536 icache_size = icache_line_size * icache_sets * icache_assoc;
537 dcache_size = dcache_line_size * dcache_sets * dcache_assoc;
538 /* Need to remove non-index bits for index ops */
539 icache_index_mask = (icache_sets - 1) * icache_line_size;
540 dcache_index_mask = (dcache_sets - 1) * dcache_line_size;
542 * These are for choosing range (index ops) versus all.
543 * icache flushes all ways for each set, so drop icache_assoc.
544 * dcache flushes all ways and each setting of bit 12 for each
545 * index, so drop dcache_assoc and halve the dcache_sets.
547 icache_range_cutoff = icache_sets * icache_line_size;
548 dcache_range_cutoff = (dcache_sets / 2) * icache_line_size;
550 printk("Primary instruction cache %ldkB, %s, linesize %d bytes.\n",
551 icache_size >> 10, way_string[icache_assoc - 1],
553 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
554 dcache_size >> 10, way_string[dcache_assoc - 1],
559 * This is called from loadmmu.c. We have to set up all the
560 * memory management function pointers, as well as initialize
561 * the caches and tlbs
563 void ld_mmu_sb1(void)
565 extern char except_vec2_sb1;
566 extern char handle_vec2_sb1;
569 /* Special cache error handler for SB1 */
570 memcpy((void *)(KSEG0 + 0x100), &except_vec2_sb1, 0x80);
571 memcpy((void *)(KSEG1 + 0x100), &except_vec2_sb1, 0x80);
572 memcpy((void *)KSEG1ADDR(&handle_vec2_sb1), &handle_vec2_sb1, 0x80);
576 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
581 * None of these are needed for the SB1 - the Dcache is
582 * physically indexed and tagged, so no virtual aliasing can
585 _flush_cache_range = (void *) sb1_nop;
586 _flush_cache_mm = (void (*)(struct mm_struct *))sb1_nop;
587 _flush_cache_all = sb1_nop;
589 /* These routines are for Icache coherence with the Dcache */
590 _flush_icache_range = sb1_flush_icache_range;
591 _flush_icache_page = sb1_flush_icache_page;
592 _flush_icache_all = __sb1_flush_icache_all; /* local only */
594 /* This implies an Icache flush too, so can't be nop'ed */
595 _flush_cache_page = sb1_flush_cache_page;
597 _flush_cache_sigtramp = sb1_flush_cache_sigtramp;
598 _flush_data_cache_page = (void *) sb1_nop;
601 ___flush_cache_all = sb1___flush_cache_all;
603 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
605 * This is the only way to force the update of K0 to complete
606 * before subsequent instruction fetch.
608 __asm__ __volatile__ (