4 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
5 * Unfortunately, that doesn't apply to PA-RISC. */
7 #include <asm/processor.h>
8 #include <asm/fixmap.h>
9 #include <linux/threads.h>
11 #include <asm/pgtable.h>
12 #include <asm/cache.h>
14 #define flush_kernel_dcache_range(start,size) \
15 flush_kernel_dcache_range_asm((start), (start)+(size));
18 flush_page_to_ram(struct page *page)
22 extern void flush_cache_all_local(void);
25 static inline void flush_cache_all(void)
27 smp_call_function((void (*)(void *))flush_cache_all_local, NULL, 1, 1);
28 flush_cache_all_local();
31 #define flush_cache_all flush_cache_all_local
35 #define flush_cache_mm(mm) flush_cache_all()
37 #define flush_cache_mm(mm) flush_cache_all_local()
40 /* The following value needs to be tuned and probably scaled with the
44 #define FLUSH_THRESHOLD 0x80000
47 flush_user_dcache_range(unsigned long start, unsigned long end)
50 flush_user_dcache_range_asm(start,end);
52 if ((end - start) < FLUSH_THRESHOLD)
53 flush_user_dcache_range_asm(start,end);
60 flush_user_icache_range(unsigned long start, unsigned long end)
63 flush_user_icache_range_asm(start,end);
65 if ((end - start) < FLUSH_THRESHOLD)
66 flush_user_icache_range_asm(start,end);
68 flush_instruction_cache();
73 flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
83 if (mm->context == sr3) {
84 flush_user_dcache_range(start,end);
85 flush_user_icache_range(start,end);
92 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
96 if (!vma->vm_mm->context) {
102 if (vma->vm_mm->context == sr3) {
103 flush_user_dcache_range(vmaddr,vmaddr + PAGE_SIZE);
104 if (vma->vm_flags & VM_EXEC)
105 flush_user_icache_range(vmaddr,vmaddr + PAGE_SIZE);
107 if (vma->vm_flags & VM_EXEC)
114 extern void __flush_dcache_page(struct page *page);
115 static inline void flush_dcache_page(struct page *page)
117 if (page->mapping && !page->mapping->i_mmap &&
118 !page->mapping->i_mmap_shared) {
119 set_bit(PG_dcache_dirty, &page->flags);
121 __flush_dcache_page(page);
125 #define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0)
127 #define flush_icache_user_range(vma, page, addr, len) \
128 flush_user_icache_range(addr, addr + len);
130 #define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
132 /* TLB flushing routines.... */
134 extern void flush_tlb_all(void);
136 static inline void load_context(mm_context_t context)
139 #if SPACEID_SHIFT == 0
140 mtctl(context << 1,8);
142 mtctl(context >> (SPACEID_SHIFT - 1),8);
149 * XXX This code is NOT valid for HP-UX compatibility processes,
150 * (although it will probably work 99% of the time). HP-UX
151 * processes are free to play with the space id's and save them
152 * over long periods of time, etc. so we have to preserve the
153 * space and just flush the entire tlb. We need to check the
154 * personality in order to do that, but the personality is not
155 * currently being set correctly.
157 * Of course, Linux processes could do the same thing, but
158 * we don't support that (and the compilers, dynamic linker,
159 * etc. do not do that).
162 static inline void flush_tlb_mm(struct mm_struct *mm)
164 if (mm == &init_mm) BUG(); /* Should never happen */
170 if (mm->context != 0)
171 free_sid(mm->context);
172 mm->context = alloc_sid();
173 if (mm == current->active_mm)
174 load_context(mm->context);
179 extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
183 static inline void flush_tlb_page(struct vm_area_struct *vma,
186 /* For one page, it's not worth testing the split_tlb variable */
188 mtsp(vma->vm_mm->context,1);
193 static inline void flush_tlb_range(struct mm_struct *mm,
194 unsigned long start, unsigned long end)
196 unsigned long npages;
198 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
199 if (npages >= 512) /* XXX arbitrary, should be tuned */
219 static inline pgd_t *pgd_alloc_one_fast (void)
221 return NULL; /* not implemented */
224 static inline pgd_t *pgd_alloc (struct mm_struct *mm)
226 /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
227 pgd_t *pgd = pgd_alloc_one_fast();
229 pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
236 static inline void pgd_free(pgd_t *pgd)
238 free_page((unsigned long)pgd);
243 /* Three Level Page Table Support for pmd's */
245 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
247 pgd_val(*pgd) = _PAGE_TABLE + __pa((unsigned long)pmd);
250 static inline pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
252 return NULL; /* la la */
255 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
257 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
263 static inline void pmd_free(pmd_t *pmd)
265 free_page((unsigned long)pmd);
270 /* Two Level Page Table Support for pmd's */
273 * allocating and freeing a pmd is trivial: the 1-entry pmd is
274 * inside the pgd, so has no extra memory associated with it.
277 #define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
278 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
279 #define pmd_free(x) do { } while (0)
280 #define pgd_populate(mm, pmd, pte) BUG()
284 static inline void pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
286 pmd_val(*pmd_entry) = _PAGE_TABLE + __pa((unsigned long)pte);
289 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
291 return NULL; /* la la */
294 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
296 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
302 static inline void pte_free(pte_t *pte)
304 free_page((unsigned long)pte);
307 extern int do_check_pgt_cache(int, int);