2 * r4kcache.h: Inline assembly cache operations.
4 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
6 * FIXME: Handle split L2 caches.
8 #ifndef _MIPS_R4KCACHE_H
9 #define _MIPS_R4KCACHE_H
12 #include <asm/cacheops.h>
14 static inline void flush_icache_line_indexed(unsigned long addr)
24 "i" (Index_Invalidate_I));
27 static inline void flush_dcache_line_indexed(unsigned long addr)
37 "i" (Index_Writeback_Inv_D));
40 static inline void flush_scache_line_indexed(unsigned long addr)
50 "i" (Index_Writeback_Inv_SD));
53 static inline void flush_icache_line(unsigned long addr)
63 "i" (Hit_Invalidate_I));
66 static inline void flush_dcache_line(unsigned long addr)
76 "i" (Hit_Writeback_Inv_D));
79 static inline void invalidate_dcache_line(unsigned long addr)
89 "i" (Hit_Invalidate_D));
92 static inline void invalidate_scache_line(unsigned long addr)
102 "i" (Hit_Invalidate_SD));
105 static inline void flush_scache_line(unsigned long addr)
107 __asm__ __volatile__(
115 "i" (Hit_Writeback_Inv_SD));
119 * The next two are for badland addresses like signal trampolines.
121 static inline void protected_flush_icache_line(unsigned long addr)
123 __asm__ __volatile__(
126 "1:\tcache %1,(%0)\n"
129 ".section\t__ex_table,\"a\"\n\t"
130 STR(PTR)"\t1b,2b\n\t"
134 "i" (Hit_Invalidate_I));
137 static inline void protected_writeback_dcache_line(unsigned long addr)
139 __asm__ __volatile__(
142 "1:\tcache %1,(%0)\n"
145 ".section\t__ex_table,\"a\"\n\t"
146 STR(PTR)"\t1b,2b\n\t"
150 "i" (Hit_Writeback_D));
153 #define cache16_unroll32(base,op) \
154 __asm__ __volatile__(" \
157 cache %1, 0x000(%0); cache %1, 0x010(%0); \
158 cache %1, 0x020(%0); cache %1, 0x030(%0); \
159 cache %1, 0x040(%0); cache %1, 0x050(%0); \
160 cache %1, 0x060(%0); cache %1, 0x070(%0); \
161 cache %1, 0x080(%0); cache %1, 0x090(%0); \
162 cache %1, 0x0a0(%0); cache %1, 0x0b0(%0); \
163 cache %1, 0x0c0(%0); cache %1, 0x0d0(%0); \
164 cache %1, 0x0e0(%0); cache %1, 0x0f0(%0); \
165 cache %1, 0x100(%0); cache %1, 0x110(%0); \
166 cache %1, 0x120(%0); cache %1, 0x130(%0); \
167 cache %1, 0x140(%0); cache %1, 0x150(%0); \
168 cache %1, 0x160(%0); cache %1, 0x170(%0); \
169 cache %1, 0x180(%0); cache %1, 0x190(%0); \
170 cache %1, 0x1a0(%0); cache %1, 0x1b0(%0); \
171 cache %1, 0x1c0(%0); cache %1, 0x1d0(%0); \
172 cache %1, 0x1e0(%0); cache %1, 0x1f0(%0); \
179 static inline void blast_dcache16(void)
181 unsigned long start = KSEG0;
182 unsigned long end = (start + dcache_size);
185 cache16_unroll32(start,Index_Writeback_Inv_D);
190 static inline void blast_dcache16_wayLSB(void)
192 unsigned long start = KSEG0;
193 unsigned long end = (start + mips_cpu.dcache.sets * mips_cpu.dcache.linesz);
197 /* LSB of VA select the way */
198 for (way = 0; way < mips_cpu.dcache.ways; way++)
199 cache16_unroll32(start|way,Index_Writeback_Inv_D);
204 static inline void blast_dcache16_page(unsigned long page)
206 unsigned long start = page;
207 unsigned long end = (start + PAGE_SIZE);
210 cache16_unroll32(start,Hit_Writeback_Inv_D);
215 static inline void blast_dcache16_page_indexed(unsigned long page)
217 unsigned long start = page;
218 unsigned long end = (start + PAGE_SIZE);
221 cache16_unroll32(start,Index_Writeback_Inv_D);
226 static inline void blast_dcache16_page_indexed_wayLSB(unsigned long page)
228 unsigned long start = page;
229 unsigned long end = (start + PAGE_SIZE);
233 /* LSB of VA select the way */
234 for (way = 0; way < mips_cpu.dcache.ways; way++)
235 cache16_unroll32(start|way,Index_Writeback_Inv_D);
240 static inline void blast_icache16(void)
242 unsigned long start = KSEG0;
243 unsigned long end = (start + icache_size);
246 cache16_unroll32(start,Index_Invalidate_I);
251 static inline void blast_icache16_wayLSB(void)
253 unsigned long start = KSEG0;
254 unsigned long end = (start + mips_cpu.icache.sets * mips_cpu.icache.linesz);
258 /* LSB of VA select the way */
259 for (way = 0; way < mips_cpu.icache.ways; way++)
260 cache16_unroll32(start|way,Index_Invalidate_I);
265 static inline void blast_icache16_page(unsigned long page)
267 unsigned long start = page;
268 unsigned long end = (start + PAGE_SIZE);
271 cache16_unroll32(start,Hit_Invalidate_I);
276 static inline void blast_icache16_page_indexed(unsigned long page)
278 unsigned long start = page;
279 unsigned long end = (start + PAGE_SIZE);
282 cache16_unroll32(start,Index_Invalidate_I);
287 static inline void blast_scache16(void)
289 unsigned long start = KSEG0;
290 unsigned long end = KSEG0 + scache_size;
293 cache16_unroll32(start,Index_Writeback_Inv_SD);
298 static inline void blast_scache16_page(unsigned long page)
300 unsigned long start = page;
301 unsigned long end = page + PAGE_SIZE;
304 cache16_unroll32(start,Hit_Writeback_Inv_SD);
309 static inline void blast_scache16_page_indexed(unsigned long page)
311 unsigned long start = page;
312 unsigned long end = page + PAGE_SIZE;
315 cache16_unroll32(start,Index_Writeback_Inv_SD);
320 #define cache32_unroll32(base,op) \
321 __asm__ __volatile__(" \
324 cache %1, 0x000(%0); cache %1, 0x020(%0); \
325 cache %1, 0x040(%0); cache %1, 0x060(%0); \
326 cache %1, 0x080(%0); cache %1, 0x0a0(%0); \
327 cache %1, 0x0c0(%0); cache %1, 0x0e0(%0); \
328 cache %1, 0x100(%0); cache %1, 0x120(%0); \
329 cache %1, 0x140(%0); cache %1, 0x160(%0); \
330 cache %1, 0x180(%0); cache %1, 0x1a0(%0); \
331 cache %1, 0x1c0(%0); cache %1, 0x1e0(%0); \
332 cache %1, 0x200(%0); cache %1, 0x220(%0); \
333 cache %1, 0x240(%0); cache %1, 0x260(%0); \
334 cache %1, 0x280(%0); cache %1, 0x2a0(%0); \
335 cache %1, 0x2c0(%0); cache %1, 0x2e0(%0); \
336 cache %1, 0x300(%0); cache %1, 0x320(%0); \
337 cache %1, 0x340(%0); cache %1, 0x360(%0); \
338 cache %1, 0x380(%0); cache %1, 0x3a0(%0); \
339 cache %1, 0x3c0(%0); cache %1, 0x3e0(%0); \
346 static inline void blast_dcache32(void)
348 unsigned long start = KSEG0;
349 unsigned long end = (start + dcache_size);
352 cache32_unroll32(start,Index_Writeback_Inv_D);
357 static inline void blast_dcache32_wayLSB(void)
359 unsigned long start = KSEG0;
360 unsigned long end = (start + mips_cpu.dcache.sets * mips_cpu.dcache.linesz);
364 /* LSB of VA select the way */
365 for (way = 0; way < mips_cpu.dcache.ways; way++)
366 cache32_unroll32(start|way,Index_Writeback_Inv_D);
372 * Call this function only with interrupts disabled or R4600 V2.0 may blow
375 * R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Inv_D,
376 * Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Excl_D will only
377 * operate correctly if the internal data cache refill buffer is empty. These
378 * CACHE instructions should be separated from any potential data cache miss
379 * by a load instruction to an uncached address to empty the response buffer."
380 * (Revision 2.0 device errata from IDT available on http://www.idt.com/
383 static inline void blast_dcache32_page(unsigned long page)
385 unsigned long start = page;
386 unsigned long end = (start + PAGE_SIZE);
389 * Sigh ... workaround for R4600 v1.7 bug. Explanation see above.
391 *(volatile unsigned long *)KSEG1;
393 __asm__ __volatile__("nop;nop;nop;nop");
395 cache32_unroll32(start,Hit_Writeback_Inv_D);
400 static inline void blast_dcache32_page_indexed(unsigned long page)
402 unsigned long start = page;
403 unsigned long end = (start + PAGE_SIZE);
406 cache32_unroll32(start,Index_Writeback_Inv_D);
411 static inline void blast_dcache32_page_indexed_wayLSB(unsigned long page)
413 unsigned long start = page;
414 unsigned long end = (start + PAGE_SIZE);
418 /* LSB of VA select the way */
419 for (way = 0; way < mips_cpu.dcache.ways; way++)
420 cache32_unroll32(start|way,Index_Writeback_Inv_D);
425 static inline void blast_icache32(void)
427 unsigned long start = KSEG0;
428 unsigned long end = (start + icache_size);
431 cache32_unroll32(start,Index_Invalidate_I);
436 static inline void blast_icache32_wayLSB(void)
438 unsigned long start = KSEG0;
439 unsigned long end = (start + mips_cpu.icache.sets * mips_cpu.icache.linesz);
443 /* LSB of VA select the way */
444 for (way = 0; way < mips_cpu.icache.ways; way++)
445 cache32_unroll32(start|way,Index_Invalidate_I);
450 static inline void blast_icache32_page(unsigned long page)
452 unsigned long start = page;
453 unsigned long end = (start + PAGE_SIZE);
456 cache32_unroll32(start,Hit_Invalidate_I);
461 static inline void blast_icache32_page_indexed(unsigned long page)
463 unsigned long start = page;
464 unsigned long end = (start + PAGE_SIZE);
467 cache32_unroll32(start,Index_Invalidate_I);
472 static inline void blast_scache32(void)
474 unsigned long start = KSEG0;
475 unsigned long end = KSEG0 + scache_size;
478 cache32_unroll32(start,Index_Writeback_Inv_SD);
483 static inline void blast_scache32_page(unsigned long page)
485 unsigned long start = page;
486 unsigned long end = page + PAGE_SIZE;
489 cache32_unroll32(start,Hit_Writeback_Inv_SD);
494 static inline void blast_scache32_page_indexed(unsigned long page)
496 unsigned long start = page;
497 unsigned long end = page + PAGE_SIZE;
500 cache32_unroll32(start,Index_Writeback_Inv_SD);
505 #define cache64_unroll32(base,op) \
506 __asm__ __volatile__(" \
509 cache %1, 0x000(%0); cache %1, 0x040(%0); \
510 cache %1, 0x080(%0); cache %1, 0x0c0(%0); \
511 cache %1, 0x100(%0); cache %1, 0x140(%0); \
512 cache %1, 0x180(%0); cache %1, 0x1c0(%0); \
513 cache %1, 0x200(%0); cache %1, 0x240(%0); \
514 cache %1, 0x280(%0); cache %1, 0x2c0(%0); \
515 cache %1, 0x300(%0); cache %1, 0x340(%0); \
516 cache %1, 0x380(%0); cache %1, 0x3c0(%0); \
517 cache %1, 0x400(%0); cache %1, 0x440(%0); \
518 cache %1, 0x480(%0); cache %1, 0x4c0(%0); \
519 cache %1, 0x500(%0); cache %1, 0x540(%0); \
520 cache %1, 0x580(%0); cache %1, 0x5c0(%0); \
521 cache %1, 0x600(%0); cache %1, 0x640(%0); \
522 cache %1, 0x680(%0); cache %1, 0x6c0(%0); \
523 cache %1, 0x700(%0); cache %1, 0x740(%0); \
524 cache %1, 0x780(%0); cache %1, 0x7c0(%0); \
531 static inline void blast_scache64(void)
533 unsigned long start = KSEG0;
534 unsigned long end = KSEG0 + scache_size;
537 cache64_unroll32(start,Index_Writeback_Inv_SD);
542 static inline void blast_scache64_page(unsigned long page)
544 unsigned long start = page;
545 unsigned long end = page + PAGE_SIZE;
548 cache64_unroll32(start,Hit_Writeback_Inv_SD);
553 static inline void blast_scache64_page_indexed(unsigned long page)
555 unsigned long start = page;
556 unsigned long end = page + PAGE_SIZE;
559 cache64_unroll32(start,Index_Writeback_Inv_SD);
564 #define cache128_unroll32(base,op) \
565 __asm__ __volatile__(" \
568 cache %1, 0x000(%0); cache %1, 0x080(%0); \
569 cache %1, 0x100(%0); cache %1, 0x180(%0); \
570 cache %1, 0x200(%0); cache %1, 0x280(%0); \
571 cache %1, 0x300(%0); cache %1, 0x380(%0); \
572 cache %1, 0x400(%0); cache %1, 0x480(%0); \
573 cache %1, 0x500(%0); cache %1, 0x580(%0); \
574 cache %1, 0x600(%0); cache %1, 0x680(%0); \
575 cache %1, 0x700(%0); cache %1, 0x780(%0); \
576 cache %1, 0x800(%0); cache %1, 0x880(%0); \
577 cache %1, 0x900(%0); cache %1, 0x980(%0); \
578 cache %1, 0xa00(%0); cache %1, 0xa80(%0); \
579 cache %1, 0xb00(%0); cache %1, 0xb80(%0); \
580 cache %1, 0xc00(%0); cache %1, 0xc80(%0); \
581 cache %1, 0xd00(%0); cache %1, 0xd80(%0); \
582 cache %1, 0xe00(%0); cache %1, 0xe80(%0); \
583 cache %1, 0xf00(%0); cache %1, 0xf80(%0); \
590 static inline void blast_scache128(void)
592 unsigned long start = KSEG0;
593 unsigned long end = KSEG0 + scache_size;
596 cache128_unroll32(start,Index_Writeback_Inv_SD);
601 static inline void blast_scache128_page(unsigned long page)
603 cache128_unroll32(page,Hit_Writeback_Inv_SD);
606 static inline void blast_scache128_page_indexed(unsigned long page)
608 cache128_unroll32(page,Index_Writeback_Inv_SD);
611 #endif /* !(_MIPS_R4KCACHE_H) */