make oldconfig will rebuild these...
[linux-2.4.21-pre4.git] / include / asm-mips64 / mips64_cache.h
1 /*
2  * mips64_cache.h
3  *
4  * Carsten Langgaard, carstenl@mips.com
5  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
6  *
7  * ########################################################################
8  *
9  *  This program is free software; you can distribute it and/or modify it
10  *  under the terms of the GNU General Public License (Version 2) as
11  *  published by the Free Software Foundation.
12  *
13  *  This program is distributed in the hope it will be useful, but WITHOUT
14  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16  *  for more details.
17  *
18  *  You should have received a copy of the GNU General Public License along
19  *  with this program; if not, write to the Free Software Foundation, Inc.,
20  *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
21  *
22  * ########################################################################
23  *
24  * Inline assembly cache operations.
25  *
26  * This file is the original r4cache.c file with modification that makes the
27  * cache handling more generic.
28  *
29  * FIXME: Handle split L2 caches.
30  *
31  */
32 #ifndef _MIPS_MIPS64_CACHE_H
33 #define _MIPS_MIPS64_CACHE_H
34
35 #include <asm/asm.h>
36 #include <asm/cacheops.h>
37
38 static inline void flush_icache_line_indexed(unsigned long addr)
39 {
40         unsigned long waystep = icache_size/mips_cpu.icache.ways;
41         unsigned int way;
42
43         for (way = 0; way < mips_cpu.icache.ways; way++)
44         {
45                 __asm__ __volatile__(
46                         ".set noreorder\n\t"
47                         "cache %1, (%0)\n\t"
48                         ".set reorder"
49                         :
50                         : "r" (addr),
51                         "i" (Index_Invalidate_I));
52
53                 addr += waystep;
54         }
55 }
56
57 static inline void flush_dcache_line_indexed(unsigned long addr)
58 {
59         unsigned long waystep = dcache_size/mips_cpu.dcache.ways;
60         unsigned int way;
61
62         for (way = 0; way < mips_cpu.dcache.ways; way++)
63         {
64                 __asm__ __volatile__(
65                         ".set noreorder\n\t"
66                         "cache %1, (%0)\n\t"
67                         ".set reorder"
68                         :
69                         : "r" (addr),
70                         "i" (Index_Writeback_Inv_D));
71
72                 addr += waystep;
73         }
74 }
75
76 static inline void flush_scache_line_indexed(unsigned long addr)
77 {
78         unsigned long waystep = scache_size/mips_cpu.scache.ways;
79         unsigned int way;
80
81         for (way = 0; way < mips_cpu.scache.ways; way++)
82         {
83                 __asm__ __volatile__(
84                         ".set noreorder\n\t"
85                         "cache %1, (%0)\n\t"
86                         ".set reorder"
87                         :
88                         : "r" (addr),
89                         "i" (Index_Writeback_Inv_SD));
90
91                 addr += waystep;
92         }
93 }
94
95 static inline void flush_icache_line(unsigned long addr)
96 {
97         __asm__ __volatile__(
98                 ".set noreorder\n\t"
99                 "cache %1, (%0)\n\t"
100                 ".set reorder"
101                 :
102                 : "r" (addr),
103                   "i" (Hit_Invalidate_I));
104 }
105
106 static inline void flush_dcache_line(unsigned long addr)
107 {
108         __asm__ __volatile__(
109                 ".set noreorder\n\t"
110                 "cache %1, (%0)\n\t"
111                 ".set reorder"
112                 :
113                 : "r" (addr),
114                   "i" (Hit_Writeback_Inv_D));
115 }
116
117 static inline void invalidate_dcache_line(unsigned long addr)
118 {
119         __asm__ __volatile__(
120                 ".set noreorder\n\t"
121                 "cache %1, (%0)\n\t"
122                 ".set reorder"
123                 :
124                 : "r" (addr),
125                   "i" (Hit_Invalidate_D));
126 }
127
128 static inline void invalidate_scache_line(unsigned long addr)
129 {
130         __asm__ __volatile__(
131                 ".set noreorder\n\t"
132                 "cache %1, (%0)\n\t"
133                 ".set reorder"
134                 :
135                 : "r" (addr),
136                   "i" (Hit_Invalidate_SD));
137 }
138
139 static inline void flush_scache_line(unsigned long addr)
140 {
141         __asm__ __volatile__(
142                 ".set noreorder\n\t"
143                 "cache %1, (%0)\n\t"
144                 ".set reorder"
145                 :
146                 : "r" (addr),
147                   "i" (Hit_Writeback_Inv_SD));
148 }
149
150 /*
151  * The next two are for badland addresses like signal trampolines.
152  */
153 static inline void protected_flush_icache_line(unsigned long addr)
154 {
155         __asm__ __volatile__(
156                 ".set noreorder\n\t"
157                 "1:\tcache %1,(%0)\n"
158                 "2:\t.set reorder\n\t"
159                 ".section\t__ex_table,\"a\"\n\t"
160                 ".dword\t1b,2b\n\t"
161                 ".previous"
162                 :
163                 : "r" (addr), "i" (Hit_Invalidate_I));
164 }
165
166 static inline void protected_writeback_dcache_line(unsigned long addr)
167 {
168         __asm__ __volatile__(
169                 ".set noreorder\n\t"
170                 "1:\tcache %1,(%0)\n"
171                 "2:\t.set reorder\n\t"
172                 ".section\t__ex_table,\"a\"\n\t"
173                 ".dword\t1b,2b\n\t"
174                 ".previous"
175                 :
176                 : "r" (addr), "i" (Hit_Writeback_D));
177 }
178
179 #define cache_unroll(base,op)                   \
180         __asm__ __volatile__("                  \
181                 .set noreorder;                 \
182                 cache %1, (%0);                 \
183                 .set reorder"                   \
184                 :                               \
185                 : "r" (base),                   \
186                   "i" (op));
187
188
189 static inline void blast_dcache(void)
190 {
191         unsigned long start = KSEG0;
192         unsigned long end = (start + dcache_size);
193
194         while(start < end) {
195                 cache_unroll(start,Index_Writeback_Inv_D);
196                 start += dc_lsize;
197         }
198 }
199
200 static inline void blast_dcache_page(unsigned long page)
201 {
202         unsigned long start = page;
203         unsigned long end = (start + PAGE_SIZE);
204
205         while(start < end) {
206                 cache_unroll(start,Hit_Writeback_Inv_D);
207                 start += dc_lsize;
208         }
209 }
210
211 static inline void blast_dcache_page_indexed(unsigned long page)
212 {
213         unsigned long start;
214         unsigned long end = (page + PAGE_SIZE);
215         unsigned long waystep = dcache_size/mips_cpu.dcache.ways;
216         unsigned int way;
217
218         for (way = 0; way < mips_cpu.dcache.ways; way++) {
219                 start = page + way*waystep;
220                 while(start < end) {
221                         cache_unroll(start,Index_Writeback_Inv_D);
222                         start += dc_lsize;
223                 }
224         }
225 }
226
227 static inline void blast_icache(void)
228 {
229         unsigned long start = KSEG0;
230         unsigned long end = (start + icache_size);
231
232         while(start < end) {
233                 cache_unroll(start,Index_Invalidate_I);
234                 start += ic_lsize;
235         }
236 }
237
238 static inline void blast_icache_page(unsigned long page)
239 {
240         unsigned long start = page;
241         unsigned long end = (start + PAGE_SIZE);
242
243         while(start < end) {
244                 cache_unroll(start,Hit_Invalidate_I);
245                 start += ic_lsize;
246         }
247 }
248
249 static inline void blast_icache_page_indexed(unsigned long page)
250 {
251         unsigned long start;
252         unsigned long end = (page + PAGE_SIZE);
253         unsigned long waystep = icache_size/mips_cpu.icache.ways;
254         unsigned int way;
255
256         for (way = 0; way < mips_cpu.icache.ways; way++) {
257                 start = page + way*waystep;
258                 while(start < end) {
259                         cache_unroll(start,Index_Invalidate_I);
260                         start += ic_lsize;
261                 }
262         }
263 }
264
265 static inline void blast_scache(void)
266 {
267         unsigned long start = KSEG0;
268         unsigned long end = KSEG0 + scache_size;
269
270         while(start < end) {
271                 cache_unroll(start,Index_Writeback_Inv_SD);
272                 start += sc_lsize;
273         }
274 }
275
276 static inline void blast_scache_page(unsigned long page)
277 {
278         unsigned long start = page;
279         unsigned long end = page + PAGE_SIZE;
280
281         while(start < end) {
282                 cache_unroll(start,Hit_Writeback_Inv_SD);
283                 start += sc_lsize;
284         }
285 }
286
287 static inline void blast_scache_page_indexed(unsigned long page)
288 {
289         unsigned long start;
290         unsigned long end = (page + PAGE_SIZE);
291         unsigned long waystep = scache_size/mips_cpu.scache.ways;
292         unsigned int way;
293
294         for (way = 0; way < mips_cpu.scache.ways; way++) {
295                 start = page + way*waystep;
296                 while(start < end) {
297                         cache_unroll(start,Index_Writeback_Inv_SD);
298                         start += sc_lsize;
299                 }
300         }
301 }
302
303 #endif /* !(_MIPS_MIPS64_CACHE_H) */
304
305