5 #include <linux/config.h>
6 #include <linux/threads.h>
7 #include <asm/processor.h>
9 #ifdef CONFIG_PTE_64BIT
10 /* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
17 * This is handled very differently on the PPC since out page tables
18 * are all 0's and I want to be able to use these zero'd pages elsewhere
19 * as well - it gives us quite a speedup.
21 * Note that the SMP/UP versions are the same but we don't need a
22 * per cpu list of zero pages because we do the zero-ing with the cache
23 * off and the access routines are lock-free but the pgt cache stuff
24 * is per-cpu since it isn't done with any lock-free access routines
25 * (although I think we need arch-specific routines so I can do lock-free).
27 * I need to generalize this so we can use it for other arch's as well.
31 #define quicklists cpu_data[smp_processor_id()]
33 extern struct pgtable_cache_struct {
34 unsigned long *pgd_cache;
35 unsigned long *pte_cache;
36 unsigned long pgtable_cache_sz;
40 #define pgd_quicklist (quicklists.pgd_cache)
41 #define pmd_quicklist ((unsigned long *)0)
42 #define pte_quicklist (quicklists.pte_cache)
43 #define pgtable_cache_size (quicklists.pgtable_cache_sz)
45 extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
46 extern atomic_t zero_sz; /* # currently pre-zero'd pages */
47 extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */
48 extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */
49 extern atomic_t zerototal; /* # pages zero'd over time */
51 #define zero_quicklist (zero_cache)
52 #define zero_cache_sz (zero_sz)
53 #define zero_cache_calls (zeropage_calls)
54 #define zero_cache_hits (zeropage_hits)
55 #define zero_cache_total (zerototal)
57 /* return a pre-zero'd page from the list, return NULL if none available -- Cort */
58 extern unsigned long get_zero_page_fast(void);
60 extern void __bad_pte(pmd_t *pmd);
62 extern __inline__ pgd_t *get_pgd_slow(void)
66 if ((ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER)) != NULL)
71 extern __inline__ pgd_t *get_pgd_fast(void)
75 if ((ret = pgd_quicklist) != NULL) {
76 pgd_quicklist = (unsigned long *)(*ret);
80 ret = (unsigned long *)get_pgd_slow();
84 extern __inline__ void free_pgd_fast(pgd_t *pgd)
86 *(unsigned long **)pgd = pgd_quicklist;
87 pgd_quicklist = (unsigned long *) pgd;
91 extern __inline__ void free_pgd_slow(pgd_t *pgd)
93 free_page((unsigned long)pgd);
96 #define pgd_free(pgd) free_pgd_fast(pgd)
97 #define pgd_alloc(mm) get_pgd_fast()
100 * We don't have any real pmd's, and this code never triggers because
101 * the pgd will always be present..
103 #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
104 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
105 #define pmd_free(x) do { } while (0)
106 #define pgd_populate(mm, pmd, pte) BUG()
108 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
111 extern int mem_init_done;
112 extern void *early_get_page(void);
115 pte = (pte_t *) __get_free_page(GFP_KERNEL);
117 pte = (pte_t *) early_get_page();
123 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
127 if ((ret = pte_quicklist) != NULL) {
128 pte_quicklist = (unsigned long *)(*ret);
130 pgtable_cache_size--;
135 extern __inline__ void pte_free_fast(pte_t *pte)
137 *(unsigned long **)pte = pte_quicklist;
138 pte_quicklist = (unsigned long *) pte;
139 pgtable_cache_size++;
142 extern __inline__ void pte_free_slow(pte_t *pte)
144 free_page((unsigned long)pte);
147 #define pte_free(pte) pte_free_slow(pte)
149 #define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = (unsigned long) (pte))
151 extern int do_check_pgt_cache(int, int);
153 #endif /* _PPC_PGALLOC_H */
154 #endif /* __KERNEL__ */