1 #ifndef _MOTOROLA_PGALLOC_H
2 #define _MOTOROLA_PGALLOC_H
4 extern struct pgtable_cache_struct {
5 unsigned long *pmd_cache;
6 unsigned long *pte_cache;
7 /* This counts in units of pointer tables, of which can be eight per page. */
8 unsigned long pgtable_cache_sz;
11 #define pgd_quicklist ((unsigned long *)0)
12 #define pmd_quicklist (quicklists.pmd_cache)
13 #define pte_quicklist (quicklists.pte_cache)
14 /* This isn't accurate because of fragmentation of allocated pages for
15 pointer tables, but that should not be a problem. */
16 #define pgtable_cache_size ((quicklists.pgtable_cache_sz+7)/8)
18 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset);
19 extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset);
21 extern pmd_t *get_pointer_table(void);
22 extern int free_pointer_table(pmd_t *);
25 extern inline void flush_tlb_kernel_page(unsigned long addr)
27 if (CPU_IS_040_OR_060) {
28 mm_segment_t old_fs = get_fs();
30 __asm__ __volatile__(".chip 68040\n\t"
36 __asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
40 extern inline pte_t *get_pte_fast(void)
46 pte_quicklist = (unsigned long *)*ret;
48 quicklists.pgtable_cache_sz -= 8;
52 #define pte_alloc_one_fast(mm,addr) get_pte_fast()
54 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
58 pte = (pte_t *) __get_free_page(GFP_KERNEL);
61 __flush_page_to_ram((unsigned long)pte);
62 flush_tlb_kernel_page((unsigned long)pte);
63 nocache_page((unsigned long)pte);
70 extern __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
72 return get_pointer_table();
76 extern inline void free_pte_fast(pte_t *pte)
78 *(unsigned long *)pte = (unsigned long)pte_quicklist;
79 pte_quicklist = (unsigned long *)pte;
80 quicklists.pgtable_cache_sz += 8;
83 extern inline void free_pte_slow(pte_t *pte)
85 cache_page((unsigned long)pte);
86 free_page((unsigned long) pte);
89 extern inline pmd_t *get_pmd_fast(void)
95 pmd_quicklist = (unsigned long *)*ret;
97 quicklists.pgtable_cache_sz--;
101 #define pmd_alloc_one_fast(mm,addr) get_pmd_fast()
103 extern inline void free_pmd_fast(pmd_t *pmd)
105 *(unsigned long *)pmd = (unsigned long)pmd_quicklist;
106 pmd_quicklist = (unsigned long *) pmd;
107 quicklists.pgtable_cache_sz++;
110 extern inline int free_pmd_slow(pmd_t *pmd)
112 return free_pointer_table(pmd);
115 /* The pgd cache is folded into the pmd cache, so these are dummy routines. */
116 extern inline pgd_t *get_pgd_fast(void)
121 extern inline void free_pgd_fast(pgd_t *pgd)
125 extern inline void free_pgd_slow(pgd_t *pgd)
129 extern void __bad_pte(pmd_t *pmd);
130 extern void __bad_pmd(pgd_t *pgd);
132 extern inline void pte_free(pte_t *pte)
137 extern inline void pmd_free(pmd_t *pmd)
143 extern inline void pte_free_kernel(pte_t *pte)
148 extern inline pte_t *pte_alloc_kernel(pmd_t *pmd, unsigned long address)
150 return pte_alloc(&init_mm,pmd, address);
153 extern inline void pmd_free_kernel(pmd_t *pmd)
158 extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
160 return pmd_alloc(&init_mm,pgd, address);
163 extern inline void pgd_free(pgd_t *pgd)
165 free_pmd_fast((pmd_t *)pgd);
168 extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
170 pgd_t *pgd = (pgd_t *)get_pmd_fast();
172 pgd = (pgd_t *)get_pointer_table();
177 #define pmd_populate(MM, PMD, PTE) pmd_set(PMD, PTE)
178 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
181 extern int do_check_pgt_cache(int, int);
183 extern inline void set_pgdir(unsigned long address, pgd_t entry)
189 * flush all user-space atc entries.
191 static inline void __flush_tlb(void)
193 if (CPU_IS_040_OR_060)
194 __asm__ __volatile__(".chip 68040\n\t"
198 __asm__ __volatile__("pflush #0,#4");
201 static inline void __flush_tlb040_one(unsigned long addr)
203 __asm__ __volatile__(".chip 68040\n\t"
209 static inline void __flush_tlb_one(unsigned long addr)
211 if (CPU_IS_040_OR_060)
212 __flush_tlb040_one(addr);
214 __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
217 #define flush_tlb() __flush_tlb()
220 * flush all atc entries (both kernel and user-space entries).
222 static inline void flush_tlb_all(void)
224 if (CPU_IS_040_OR_060)
225 __asm__ __volatile__(".chip 68040\n\t"
229 __asm__ __volatile__("pflusha");
232 static inline void flush_tlb_mm(struct mm_struct *mm)
234 if (mm == current->mm)
238 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
240 if (vma->vm_mm == current->mm)
241 __flush_tlb_one(addr);
244 static inline void flush_tlb_range(struct mm_struct *mm,
245 unsigned long start, unsigned long end)
247 if (mm == current->mm)
252 extern inline void flush_tlb_pgtables(struct mm_struct *mm,
253 unsigned long start, unsigned long end)
257 #endif /* _MOTOROLA_PGALLOC_H */