4 * Carsten Langgaard, carstenl@mips.com
5 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
7 * ########################################################################
9 * This program is free software; you can distribute it and/or modify it
10 * under the terms of the GNU General Public License (Version 2) as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 * ########################################################################
24 * Inline assembly cache operations.
26 * This file is the original r4cache.c file with modification that makes the
27 * cache handling more generic.
29 * FIXME: Handle split L2 caches.
32 #ifndef _MIPS_MIPS64_CACHE_H
33 #define _MIPS_MIPS64_CACHE_H
36 #include <asm/cacheops.h>
38 static inline void flush_icache_line_indexed(unsigned long addr)
40 unsigned long waystep = icache_size/mips_cpu.icache.ways;
43 for (way = 0; way < mips_cpu.icache.ways; way++)
51 "i" (Index_Invalidate_I));
57 static inline void flush_dcache_line_indexed(unsigned long addr)
59 unsigned long waystep = dcache_size/mips_cpu.dcache.ways;
62 for (way = 0; way < mips_cpu.dcache.ways; way++)
70 "i" (Index_Writeback_Inv_D));
76 static inline void flush_scache_line_indexed(unsigned long addr)
78 unsigned long waystep = scache_size/mips_cpu.scache.ways;
81 for (way = 0; way < mips_cpu.scache.ways; way++)
89 "i" (Index_Writeback_Inv_SD));
95 static inline void flush_icache_line(unsigned long addr)
103 "i" (Hit_Invalidate_I));
106 static inline void flush_dcache_line(unsigned long addr)
108 __asm__ __volatile__(
114 "i" (Hit_Writeback_Inv_D));
117 static inline void invalidate_dcache_line(unsigned long addr)
119 __asm__ __volatile__(
125 "i" (Hit_Invalidate_D));
128 static inline void invalidate_scache_line(unsigned long addr)
130 __asm__ __volatile__(
136 "i" (Hit_Invalidate_SD));
139 static inline void flush_scache_line(unsigned long addr)
141 __asm__ __volatile__(
147 "i" (Hit_Writeback_Inv_SD));
151 * The next two are for badland addresses like signal trampolines.
153 static inline void protected_flush_icache_line(unsigned long addr)
155 __asm__ __volatile__(
157 "1:\tcache %1,(%0)\n"
158 "2:\t.set reorder\n\t"
159 ".section\t__ex_table,\"a\"\n\t"
163 : "r" (addr), "i" (Hit_Invalidate_I));
166 static inline void protected_writeback_dcache_line(unsigned long addr)
168 __asm__ __volatile__(
170 "1:\tcache %1,(%0)\n"
171 "2:\t.set reorder\n\t"
172 ".section\t__ex_table,\"a\"\n\t"
176 : "r" (addr), "i" (Hit_Writeback_D));
179 #define cache_unroll(base,op) \
180 __asm__ __volatile__(" \
189 static inline void blast_dcache(void)
191 unsigned long start = KSEG0;
192 unsigned long end = (start + dcache_size);
195 cache_unroll(start,Index_Writeback_Inv_D);
200 static inline void blast_dcache_page(unsigned long page)
202 unsigned long start = page;
203 unsigned long end = (start + PAGE_SIZE);
206 cache_unroll(start,Hit_Writeback_Inv_D);
211 static inline void blast_dcache_page_indexed(unsigned long page)
214 unsigned long end = (page + PAGE_SIZE);
215 unsigned long waystep = dcache_size/mips_cpu.dcache.ways;
218 for (way = 0; way < mips_cpu.dcache.ways; way++) {
219 start = page + way*waystep;
221 cache_unroll(start,Index_Writeback_Inv_D);
227 static inline void blast_icache(void)
229 unsigned long start = KSEG0;
230 unsigned long end = (start + icache_size);
233 cache_unroll(start,Index_Invalidate_I);
238 static inline void blast_icache_page(unsigned long page)
240 unsigned long start = page;
241 unsigned long end = (start + PAGE_SIZE);
244 cache_unroll(start,Hit_Invalidate_I);
249 static inline void blast_icache_page_indexed(unsigned long page)
252 unsigned long end = (page + PAGE_SIZE);
253 unsigned long waystep = icache_size/mips_cpu.icache.ways;
256 for (way = 0; way < mips_cpu.icache.ways; way++) {
257 start = page + way*waystep;
259 cache_unroll(start,Index_Invalidate_I);
265 static inline void blast_scache(void)
267 unsigned long start = KSEG0;
268 unsigned long end = KSEG0 + scache_size;
271 cache_unroll(start,Index_Writeback_Inv_SD);
276 static inline void blast_scache_page(unsigned long page)
278 unsigned long start = page;
279 unsigned long end = page + PAGE_SIZE;
282 cache_unroll(start,Hit_Writeback_Inv_SD);
287 static inline void blast_scache_page_indexed(unsigned long page)
290 unsigned long end = (page + PAGE_SIZE);
291 unsigned long waystep = scache_size/mips_cpu.scache.ways;
294 for (way = 0; way < mips_cpu.scache.ways; way++) {
295 start = page + way*waystep;
297 cache_unroll(start,Index_Writeback_Inv_SD);
303 #endif /* !(_MIPS_MIPS64_CACHE_H) */