1 #ifndef _X86_64_PGALLOC_H
2 #define _X86_64_PGALLOC_H
4 #include <linux/config.h>
5 #include <asm/processor.h>
6 #include <asm/fixmap.h>
8 #include <linux/threads.h>
12 #define inc_pgcache_size() add_pda(pgtable_cache_sz,1UL)
13 #define dec_pgcache_size() sub_pda(pgtable_cache_sz,1UL)
15 #define pmd_populate(mm, pmd, pte) \
16 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
17 #define pgd_populate(mm, pgd, pmd) \
18 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pmd)))
20 extern __inline__ pmd_t *get_pmd_slow(void)
22 return (pmd_t *)get_zeroed_page(GFP_KERNEL);
25 extern __inline__ pmd_t *get_pmd_fast(void)
29 if ((ret = read_pda(pmd_quick)) != NULL) {
30 write_pda(pmd_quick, (unsigned long *)(*ret));
34 ret = (unsigned long *)get_pmd_slow();
38 extern __inline__ void pmd_free(pmd_t *pmd)
40 *(unsigned long *)pmd = (unsigned long) read_pda(pmd_quick);
41 write_pda(pmd_quick,(unsigned long *) pmd);
45 extern __inline__ void pmd_free_slow(pmd_t *pmd)
47 if ((unsigned long)pmd & (PAGE_SIZE-1))
49 free_page((unsigned long)pmd);
52 static inline pmd_t *pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
54 unsigned long *ret = (unsigned long *)read_pda(pmd_quick);
57 write_pda(pmd_quick, (unsigned long *)(*ret));
64 static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
66 return (pmd_t *)get_zeroed_page(GFP_KERNEL);
69 static inline pgd_t *pgd_alloc_one_fast (void)
71 unsigned long *ret = read_pda(pgd_quick);
74 write_pda(pgd_quick,(unsigned long *)(*ret));
81 static inline pgd_t *pgd_alloc (struct mm_struct *mm)
83 /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
84 pgd_t *pgd = pgd_alloc_one_fast();
87 pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
91 static inline void pgd_free (pgd_t *pgd)
93 *(unsigned long *)pgd = (unsigned long) read_pda(pgd_quick);
94 write_pda(pgd_quick,(unsigned long *) pgd);
99 static inline void pgd_free_slow (pgd_t *pgd)
101 if ((unsigned long)pgd & (PAGE_SIZE-1))
103 free_page((unsigned long)pgd);
107 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
109 return (pte_t *)get_zeroed_page(GFP_KERNEL);
112 extern __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
116 if ((ret = read_pda(pte_quick)) != NULL) {
117 write_pda(pte_quick, (unsigned long *)(*ret));
124 /* Should really implement gc for free page table pages. This could be done with
125 a reference count in struct page. */
127 extern __inline__ void pte_free(pte_t *pte)
129 *(unsigned long *)pte = (unsigned long) read_pda(pte_quick);
130 write_pda(pte_quick, (unsigned long *) pte);
134 extern __inline__ void pte_free_slow(pte_t *pte)
136 if ((unsigned long)pte & (PAGE_SIZE-1))
138 free_page((unsigned long)pte);
142 extern int do_check_pgt_cache(int, int);
147 * - flush_tlb() flushes the current mm struct TLBs
148 * - flush_tlb_all() flushes all processes TLBs
149 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
150 * - flush_tlb_page(vma, vmaddr) flushes one page
151 * - flush_tlb_range(mm, start, end) flushes a range of pages
152 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
157 #define flush_tlb() __flush_tlb()
158 #define flush_tlb_all() __flush_tlb_all()
159 #define local_flush_tlb() __flush_tlb()
161 static inline void flush_tlb_mm(struct mm_struct *mm)
163 if (mm == current->active_mm)
167 static inline void flush_tlb_page(struct vm_area_struct *vma,
170 if (vma->vm_mm == current->active_mm)
171 __flush_tlb_one(addr);
174 static inline void flush_tlb_range(struct mm_struct *mm,
175 unsigned long start, unsigned long end)
177 if (mm == current->active_mm)
185 #define local_flush_tlb() \
188 extern void flush_tlb_all(void);
189 extern void flush_tlb_current_task(void);
190 extern void flush_tlb_mm(struct mm_struct *);
191 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
193 #define flush_tlb() flush_tlb_current_task()
195 static inline void flush_tlb_range(struct mm_struct * mm, unsigned long start, unsigned long end)
200 #define TLBSTATE_OK 1
201 #define TLBSTATE_LAZY 2
205 struct mm_struct *active_mm;
207 } ____cacheline_aligned;
208 extern struct tlb_state cpu_tlbstate[NR_CPUS];
213 extern inline void flush_tlb_pgtables(struct mm_struct *mm,
214 unsigned long start, unsigned long end)
219 #endif /* _X86_64_PGALLOC_H */