2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Inline assembly cache operations.
8 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
10 * FIXME: Handle split L2 caches.
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
16 #include <asm/r4kcacheops.h>
18 static inline void flush_icache_line_indexed(unsigned long addr)
25 : "r" (addr), "i" (Index_Invalidate_I));
28 static inline void flush_dcache_line_indexed(unsigned long addr)
35 : "r" (addr), "i" (Index_Writeback_Inv_D));
38 static inline void flush_scache_line_indexed(unsigned long addr)
45 : "r" (addr), "i" (Index_Writeback_Inv_SD));
48 static inline void flush_icache_line(unsigned long addr)
55 : "r" (addr), "i" (Hit_Invalidate_I));
58 static inline void flush_dcache_line(unsigned long addr)
65 : "r" (addr), "i" (Hit_Writeback_Inv_D));
68 static inline void invalidate_dcache_line(unsigned long addr)
75 : "r" (addr), "i" (Hit_Invalidate_D));
78 static inline void invalidate_scache_line(unsigned long addr)
85 : "r" (addr), "i" (Hit_Invalidate_SD));
88 static inline void flush_scache_line(unsigned long addr)
95 : "r" (addr), "i" (Hit_Writeback_Inv_SD));
99 * The next two are for badland addresses like signal trampolines.
101 static inline void protected_flush_icache_line(unsigned long addr)
103 __asm__ __volatile__(
105 "1:\tcache %1,(%0)\n"
106 "2:\t.set reorder\n\t"
107 ".section\t__ex_table,\"a\"\n\t"
111 : "r" (addr), "i" (Hit_Invalidate_I));
114 static inline void protected_writeback_dcache_line(unsigned long addr)
116 __asm__ __volatile__(
118 "1:\tcache %1,(%0)\n"
119 "2:\t.set reorder\n\t"
120 ".section\t__ex_table,\"a\"\n\t"
124 : "r" (addr), "i" (Hit_Writeback_D));
127 #define cache16_unroll32(base,op) \
128 __asm__ __volatile__(" \
130 cache %1, 0x000(%0); cache %1, 0x010(%0); \
131 cache %1, 0x020(%0); cache %1, 0x030(%0); \
132 cache %1, 0x040(%0); cache %1, 0x050(%0); \
133 cache %1, 0x060(%0); cache %1, 0x070(%0); \
134 cache %1, 0x080(%0); cache %1, 0x090(%0); \
135 cache %1, 0x0a0(%0); cache %1, 0x0b0(%0); \
136 cache %1, 0x0c0(%0); cache %1, 0x0d0(%0); \
137 cache %1, 0x0e0(%0); cache %1, 0x0f0(%0); \
138 cache %1, 0x100(%0); cache %1, 0x110(%0); \
139 cache %1, 0x120(%0); cache %1, 0x130(%0); \
140 cache %1, 0x140(%0); cache %1, 0x150(%0); \
141 cache %1, 0x160(%0); cache %1, 0x170(%0); \
142 cache %1, 0x180(%0); cache %1, 0x190(%0); \
143 cache %1, 0x1a0(%0); cache %1, 0x1b0(%0); \
144 cache %1, 0x1c0(%0); cache %1, 0x1d0(%0); \
145 cache %1, 0x1e0(%0); cache %1, 0x1f0(%0); \
151 static inline void blast_dcache16(void)
153 unsigned long start = KSEG0;
154 unsigned long end = (start + dcache_size);
157 cache16_unroll32(start,Index_Writeback_Inv_D);
162 static inline void blast_dcache16_page(unsigned long page)
164 unsigned long start = page;
165 unsigned long end = (start + PAGE_SIZE);
168 cache16_unroll32(start,Hit_Writeback_Inv_D);
173 static inline void blast_dcache16_page_indexed(unsigned long page)
175 unsigned long start = page;
176 unsigned long end = (start + PAGE_SIZE);
179 cache16_unroll32(start,Index_Writeback_Inv_D);
184 static inline void blast_icache16(void)
186 unsigned long start = KSEG0;
187 unsigned long end = (start + icache_size);
190 cache16_unroll32(start,Index_Invalidate_I);
195 static inline void blast_icache16_page(unsigned long page)
197 unsigned long start = page;
198 unsigned long end = (start + PAGE_SIZE);
201 cache16_unroll32(start,Hit_Invalidate_I);
206 static inline void blast_icache16_page_indexed(unsigned long page)
208 unsigned long start = page;
209 unsigned long end = (start + PAGE_SIZE);
212 cache16_unroll32(start,Index_Invalidate_I);
217 static inline void blast_scache16(void)
219 unsigned long start = KSEG0;
220 unsigned long end = KSEG0 + scache_size;
223 cache16_unroll32(start,Index_Writeback_Inv_SD);
228 static inline void blast_scache16_page(unsigned long page)
230 unsigned long start = page;
231 unsigned long end = page + PAGE_SIZE;
234 cache16_unroll32(start,Hit_Writeback_Inv_SD);
239 static inline void blast_scache16_page_indexed(unsigned long page)
241 unsigned long start = page;
242 unsigned long end = page + PAGE_SIZE;
245 cache16_unroll32(start,Index_Writeback_Inv_SD);
250 #define cache32_unroll32(base,op) \
251 __asm__ __volatile__(" \
253 cache %1, 0x000(%0); cache %1, 0x020(%0); \
254 cache %1, 0x040(%0); cache %1, 0x060(%0); \
255 cache %1, 0x080(%0); cache %1, 0x0a0(%0); \
256 cache %1, 0x0c0(%0); cache %1, 0x0e0(%0); \
257 cache %1, 0x100(%0); cache %1, 0x120(%0); \
258 cache %1, 0x140(%0); cache %1, 0x160(%0); \
259 cache %1, 0x180(%0); cache %1, 0x1a0(%0); \
260 cache %1, 0x1c0(%0); cache %1, 0x1e0(%0); \
261 cache %1, 0x200(%0); cache %1, 0x220(%0); \
262 cache %1, 0x240(%0); cache %1, 0x260(%0); \
263 cache %1, 0x280(%0); cache %1, 0x2a0(%0); \
264 cache %1, 0x2c0(%0); cache %1, 0x2e0(%0); \
265 cache %1, 0x300(%0); cache %1, 0x320(%0); \
266 cache %1, 0x340(%0); cache %1, 0x360(%0); \
267 cache %1, 0x380(%0); cache %1, 0x3a0(%0); \
268 cache %1, 0x3c0(%0); cache %1, 0x3e0(%0); \
274 static inline void blast_dcache32(void)
276 unsigned long start = KSEG0;
277 unsigned long end = (start + dcache_size);
280 cache32_unroll32(start,Index_Writeback_Inv_D);
286 * Call this function only with interrupts disabled or R4600 V2.0 may blow
289 * R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Inv_D,
290 * Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Excl_D will only
291 * operate correctly if the internal data cache refill buffer is empty. These
292 * CACHE instructions should be separated from any potential data cache miss
293 * by a load instruction to an uncached address to empty the response buffer."
294 * (Revision 2.0 device errata from IDT available on http://www.idt.com/
297 static inline void blast_dcache32_page(unsigned long page)
299 unsigned long start = page;
300 unsigned long end = (start + PAGE_SIZE);
303 * Sigh ... workaround for R4600 v1.7 bug. Explanation see above.
305 *(volatile unsigned long *)KSEG1;
307 __asm__ __volatile__("nop;nop;nop;nop");
309 cache32_unroll32(start,Hit_Writeback_Inv_D);
314 static inline void blast_dcache32_page_indexed(unsigned long page)
316 unsigned long start = page;
317 unsigned long end = (start + PAGE_SIZE);
320 cache32_unroll32(start,Index_Writeback_Inv_D);
325 static inline void blast_icache32(void)
327 unsigned long start = KSEG0;
328 unsigned long end = (start + icache_size);
331 cache32_unroll32(start,Index_Invalidate_I);
336 static inline void blast_icache32_page(unsigned long page)
338 unsigned long start = page;
339 unsigned long end = (start + PAGE_SIZE);
342 cache32_unroll32(start,Hit_Invalidate_I);
347 static inline void blast_icache32_page_indexed(unsigned long page)
349 unsigned long start = page;
350 unsigned long end = (start + PAGE_SIZE);
353 cache32_unroll32(start,Index_Invalidate_I);
358 static inline void blast_scache32(void)
360 unsigned long start = KSEG0;
361 unsigned long end = KSEG0 + scache_size;
364 cache32_unroll32(start,Index_Writeback_Inv_SD);
369 static inline void blast_scache32_page(unsigned long page)
371 unsigned long start = page;
372 unsigned long end = page + PAGE_SIZE;
375 cache32_unroll32(start,Hit_Writeback_Inv_SD);
380 static inline void blast_scache32_page_indexed(unsigned long page)
382 unsigned long start = page;
383 unsigned long end = page + PAGE_SIZE;
386 cache32_unroll32(start,Index_Writeback_Inv_SD);
391 #define cache64_unroll32(base,op) \
392 __asm__ __volatile__(" \
394 cache %1, 0x000(%0); cache %1, 0x040(%0); \
395 cache %1, 0x080(%0); cache %1, 0x0c0(%0); \
396 cache %1, 0x100(%0); cache %1, 0x140(%0); \
397 cache %1, 0x180(%0); cache %1, 0x1c0(%0); \
398 cache %1, 0x200(%0); cache %1, 0x240(%0); \
399 cache %1, 0x280(%0); cache %1, 0x2c0(%0); \
400 cache %1, 0x300(%0); cache %1, 0x340(%0); \
401 cache %1, 0x380(%0); cache %1, 0x3c0(%0); \
402 cache %1, 0x400(%0); cache %1, 0x440(%0); \
403 cache %1, 0x480(%0); cache %1, 0x4c0(%0); \
404 cache %1, 0x500(%0); cache %1, 0x540(%0); \
405 cache %1, 0x580(%0); cache %1, 0x5c0(%0); \
406 cache %1, 0x600(%0); cache %1, 0x640(%0); \
407 cache %1, 0x680(%0); cache %1, 0x6c0(%0); \
408 cache %1, 0x700(%0); cache %1, 0x740(%0); \
409 cache %1, 0x780(%0); cache %1, 0x7c0(%0); \
415 static inline void blast_scache64(void)
417 unsigned long start = KSEG0;
418 unsigned long end = KSEG0 + scache_size;
421 cache64_unroll32(start,Index_Writeback_Inv_SD);
426 static inline void blast_scache64_page(unsigned long page)
428 unsigned long start = page;
429 unsigned long end = page + PAGE_SIZE;
432 cache64_unroll32(start,Hit_Writeback_Inv_SD);
437 static inline void blast_scache64_page_indexed(unsigned long page)
439 unsigned long start = page;
440 unsigned long end = page + PAGE_SIZE;
443 cache64_unroll32(start,Index_Writeback_Inv_SD);
448 #define cache128_unroll32(base,op) \
449 __asm__ __volatile__(" \
451 cache %1, 0x000(%0); cache %1, 0x080(%0); \
452 cache %1, 0x100(%0); cache %1, 0x180(%0); \
453 cache %1, 0x200(%0); cache %1, 0x280(%0); \
454 cache %1, 0x300(%0); cache %1, 0x380(%0); \
455 cache %1, 0x400(%0); cache %1, 0x480(%0); \
456 cache %1, 0x500(%0); cache %1, 0x580(%0); \
457 cache %1, 0x600(%0); cache %1, 0x680(%0); \
458 cache %1, 0x700(%0); cache %1, 0x780(%0); \
459 cache %1, 0x800(%0); cache %1, 0x880(%0); \
460 cache %1, 0x900(%0); cache %1, 0x980(%0); \
461 cache %1, 0xa00(%0); cache %1, 0xa80(%0); \
462 cache %1, 0xb00(%0); cache %1, 0xb80(%0); \
463 cache %1, 0xc00(%0); cache %1, 0xc80(%0); \
464 cache %1, 0xd00(%0); cache %1, 0xd80(%0); \
465 cache %1, 0xe00(%0); cache %1, 0xe80(%0); \
466 cache %1, 0xf00(%0); cache %1, 0xf80(%0); \
472 static inline void blast_scache128(void)
474 unsigned long start = KSEG0;
475 unsigned long end = KSEG0 + scache_size;
478 cache128_unroll32(start,Index_Writeback_Inv_SD);
483 static inline void blast_scache128_page(unsigned long page)
485 cache128_unroll32(page,Hit_Writeback_Inv_SD);
488 static inline void blast_scache128_page_indexed(unsigned long page)
490 cache128_unroll32(page,Index_Writeback_Inv_SD);
493 #endif /* __ASM_R4KCACHE_H */