1 #ifndef __ASM_SH64_PGALLOC_H
2 #define __ASM_SH64_PGALLOC_H
4 #include <asm/processor.h>
5 #include <linux/threads.h>
6 #include <linux/slab.h>
8 #define pgd_quicklist ((unsigned long *)0)
9 #define pmd_quicklist ((unsigned long *)0)
10 #define pte_quicklist ((unsigned long *)0)
11 #define pgtable_cache_size 0L
14 #define pmd_populate(mm, pmd, pte) \
15 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
20 * Allocate and free page tables.
23 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
25 unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
26 pgd_t *pgd = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL);
29 memset(pgd, 0, pgd_size);
34 static inline void pgd_free(pgd_t *pgd)
39 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
41 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
47 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
52 static inline void pte_free_slow(pte_t *pte)
54 free_page((unsigned long)pte);
57 #define pte_free(pte) pte_free_slow(pte)
59 #define pgd_set(pgd,pmd) pgd_val(*pgd) = ( ((unsigned long)pmd) & PAGE_MASK)
61 #define pgd_populate(mm, pgd, pmd) pgd_set(pgd,pmd)
65 pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
67 #if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
74 pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
78 #if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
80 #elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
81 pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
89 #if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
95 #elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
99 free_page((unsigned long)pmd);
105 pmd_populate (struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
111 // pmd_val(*pmd_entry) = __pa(pte);
116 #define do_check_pgt_cache(low, high) (0)
121 * - flush_tlb() flushes the current mm struct TLBs
122 * - flush_tlb_all() flushes all processes TLBs
123 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
124 * - flush_tlb_page(vma, vmaddr) flushes one page
125 * - flush_tlb_range(mm, start, end) flushes a range of pages
126 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
129 extern void flush_tlb(void);
130 extern void flush_tlb_all(void);
131 extern void flush_tlb_mm(struct mm_struct *mm);
132 extern void flush_tlb_range(struct mm_struct *mm, unsigned long start,
134 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
136 static inline void flush_tlb_pgtables(struct mm_struct *mm,
137 unsigned long start, unsigned long end)
138 { /* Nothing to do */
141 /* These are generic functions. These will need to change once D cache
142 * aliasing support has been added.
145 static inline pte_t ptep_get_and_clear(pte_t *ptep)
153 * Following functions are same as generic ones.
155 static inline int ptep_test_and_clear_young(pte_t *ptep)
160 set_pte(ptep, pte_mkold(pte));
164 static inline int ptep_test_and_clear_dirty(pte_t *ptep)
169 set_pte(ptep, pte_mkclean(pte));
173 static inline void ptep_set_wrprotect(pte_t *ptep)
175 pte_t old_pte = *ptep;
176 set_pte(ptep, pte_wrprotect(old_pte));
179 static inline void ptep_mkdirty(pte_t *ptep)
181 pte_t old_pte = *ptep;
182 set_pte(ptep, pte_mkdirty(old_pte));
184 #endif /* __ASM_SH64_PGALLOC_H */