4 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
5 * Unfortunately, that doesn't apply to PA-RISC. */
7 #include <asm/processor.h>
8 #include <asm/fixmap.h>
9 #include <linux/threads.h>
11 #include <asm/pgtable.h>
12 #include <asm/cache.h>
14 #define flush_kernel_dcache_range(start,size) \
15 flush_kernel_dcache_range_asm((start), (start)+(size));
18 flush_page_to_ram(struct page *page)
22 extern void flush_cache_all_local(void);
25 static inline void flush_cache_all(void)
27 smp_call_function((void (*)(void *))flush_cache_all_local, NULL, 1, 1);
28 flush_cache_all_local();
31 #define flush_cache_all flush_cache_all_local
35 #define flush_cache_mm(mm) flush_cache_all()
37 #define flush_cache_mm(mm) flush_cache_all_local()
40 /* The following value needs to be tuned and probably scaled with the
44 #define FLUSH_THRESHOLD 0x80000
47 flush_user_dcache_range(unsigned long start, unsigned long end)
50 flush_user_dcache_range_asm(start,end);
52 if ((end - start) < FLUSH_THRESHOLD)
53 flush_user_dcache_range_asm(start,end);
60 flush_user_icache_range(unsigned long start, unsigned long end)
63 flush_user_icache_range_asm(start,end);
65 if ((end - start) < FLUSH_THRESHOLD)
66 flush_user_icache_range_asm(start,end);
68 flush_instruction_cache();
73 flush_cache_range(struct mm_struct *mm, unsigned long start, unsigned long end)
83 if (mm->context == sr3) {
84 flush_user_dcache_range(start,end);
85 flush_user_icache_range(start,end);
92 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
96 if (!vma->vm_mm->context) {
102 if (vma->vm_mm->context == sr3) {
103 flush_user_dcache_range(vmaddr,vmaddr + PAGE_SIZE);
104 if (vma->vm_flags & VM_EXEC)
105 flush_user_icache_range(vmaddr,vmaddr + PAGE_SIZE);
107 if (vma->vm_flags & VM_EXEC)
114 static inline void flush_dcache_page(struct page *page)
116 if (page->mapping && !page->mapping->i_mmap &&
117 !page->mapping->i_mmap_shared) {
118 set_bit(PG_dcache_dirty, &page->flags);
120 flush_kernel_dcache_page(page_address(page));
124 #define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0)
126 #define flush_icache_user_range(vma, page, addr, len) \
127 flush_user_icache_range(addr, addr + len);
129 #define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
131 /* TLB flushing routines.... */
133 extern void flush_tlb_all(void);
135 static inline void load_context(mm_context_t context)
138 #if SPACEID_SHIFT == 0
139 mtctl(context << 1,8);
141 mtctl(context >> (SPACEID_SHIFT - 1),8);
148 * XXX This code is NOT valid for HP-UX compatibility processes,
149 * (although it will probably work 99% of the time). HP-UX
150 * processes are free to play with the space id's and save them
151 * over long periods of time, etc. so we have to preserve the
152 * space and just flush the entire tlb. We need to check the
153 * personality in order to do that, but the personality is not
154 * currently being set correctly.
156 * Of course, Linux processes could do the same thing, but
157 * we don't support that (and the compilers, dynamic linker,
158 * etc. do not do that).
161 static inline void flush_tlb_mm(struct mm_struct *mm)
163 if (mm == &init_mm) BUG(); /* Should never happen */
169 if (mm->context != 0)
170 free_sid(mm->context);
171 mm->context = alloc_sid();
172 if (mm == current->active_mm)
173 load_context(mm->context);
178 extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
182 static inline void flush_tlb_page(struct vm_area_struct *vma,
185 /* For one page, it's not worth testing the split_tlb variable */
187 mtsp(vma->vm_mm->context,1);
192 static inline void flush_tlb_range(struct mm_struct *mm,
193 unsigned long start, unsigned long end)
195 unsigned long npages;
197 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
198 if (npages >= 512) /* XXX arbitrary, should be tuned */
218 static inline pgd_t *pgd_alloc_one_fast (void)
220 return NULL; /* not implemented */
223 static inline pgd_t *pgd_alloc (struct mm_struct *mm)
225 /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
226 pgd_t *pgd = pgd_alloc_one_fast();
228 pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
235 static inline void pgd_free(pgd_t *pgd)
237 free_page((unsigned long)pgd);
242 /* Three Level Page Table Support for pmd's */
244 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
246 pgd_val(*pgd) = _PAGE_TABLE + __pa((unsigned long)pmd);
249 static inline pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
251 return NULL; /* la la */
254 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
256 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
262 static inline void pmd_free(pmd_t *pmd)
264 free_page((unsigned long)pmd);
269 /* Two Level Page Table Support for pmd's */
272 * allocating and freeing a pmd is trivial: the 1-entry pmd is
273 * inside the pgd, so has no extra memory associated with it.
276 #define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
277 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
278 #define pmd_free(x) do { } while (0)
279 #define pgd_populate(mm, pmd, pte) BUG()
283 static inline void pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
285 pmd_val(*pmd_entry) = _PAGE_TABLE + __pa((unsigned long)pte);
288 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
290 return NULL; /* la la */
293 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
295 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
301 static inline void pte_free(pte_t *pte)
303 free_page((unsigned long)pte);
306 extern int do_check_pgt_cache(int, int);