5 #include <linux/config.h>
7 #include <asm/virtconvert.h>
10 * Cache handling functions
13 #define flush_icache() \
15 if (CPU_IS_040_OR_060) \
16 __asm__ __volatile__("nop\n\t" \
22 __asm__ __volatile__("movec %%cacr,%0\n\t" \
31 * invalidate the cache for the specified memory range.
32 * It starts at the physical address specified for
33 * the given number of bytes.
35 extern void cache_clear(unsigned long paddr, int len);
37 * push any dirty cache in the specified memory range.
38 * It starts at the physical address specified for
39 * the given number of bytes.
41 extern void cache_push(unsigned long paddr, int len);
44 * push and invalidate pages in the specified user virtual
47 extern void cache_push_v(unsigned long vaddr, int len);
50 #define FLUSH_I_AND_D (0x00000808)
51 #define FLUSH_I (0x00000008)
53 /* This is needed whenever the virtual mapping of the current
55 #define __flush_cache_all() \
57 if (CPU_IS_040_OR_060) \
58 __asm__ __volatile__("nop\n\t" \
64 __asm__ __volatile__("movec %%cacr,%0\n\t" \
68 : "di" (FLUSH_I_AND_D)); \
72 #define __flush_cache_030() \
74 if (CPU_IS_020_OR_030) { \
76 __asm__ __volatile__("movec %%cacr,%0\n\t" \
80 : "di" (FLUSH_I_AND_D)); \
84 #define flush_cache_all() __flush_cache_all()
86 extern inline void flush_cache_mm(struct mm_struct *mm)
88 if (mm == current->mm)
92 extern inline void flush_cache_range(struct mm_struct *mm,
96 if (mm == current->mm)
100 extern inline void flush_cache_page(struct vm_area_struct *vma,
101 unsigned long vmaddr)
103 if (vma->vm_mm == current->mm)
107 /* Push the page at kernel virtual address and clear the icache */
108 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
109 #define flush_page_to_ram(page) __flush_page_to_ram((unsigned long) page_address(page))
110 extern inline void __flush_page_to_ram(unsigned long address)
112 if (CPU_IS_040_OR_060) {
113 __asm__ __volatile__("nop\n\t"
115 "cpushp %%bc,(%0)\n\t"
117 : : "a" (__pa((void *)address)));
120 __asm__ __volatile__("movec %%cacr,%0\n\t"
128 #define flush_dcache_page(page) do { } while (0)
129 #define flush_icache_page(vma,pg) do { } while (0)
130 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
132 /* Push n pages at kernel virtual address and clear the icache */
133 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
134 extern inline void flush_icache_range (unsigned long address,
135 unsigned long endaddr)
137 if (CPU_IS_040_OR_060) {
138 short n = (endaddr - address + PAGE_SIZE - 1) / PAGE_SIZE;
141 __asm__ __volatile__("nop\n\t"
143 "cpushp %%bc,(%0)\n\t"
145 : : "a" (virt_to_phys((void *)address)));
146 address += PAGE_SIZE;
150 __asm__ __volatile__("movec %%cacr,%0\n\t"
162 #include <asm/sun3_pgalloc.h>
164 #include <asm/motorola_pgalloc.h>
167 #endif /* M68K_PGALLOC_H */