2 * reorganization around 2.3.39, routines moved from sun3_pgtable.h
4 * moved 1/26/2000 Sam Creasey
7 #ifndef _SUN3_PGALLOC_H
8 #define _SUN3_PGALLOC_H
10 /* Pagetable caches. */
11 //todo: should implement for at least ptes. --m
12 #define pgd_quicklist ((unsigned long *) 0)
13 #define pmd_quicklist ((unsigned long *) 0)
14 #define pte_quicklist ((unsigned long *) 0)
15 #define pgtable_cache_size (0L)
17 /* Allocation and deallocation of various flavours of pagetables. */
18 extern inline int free_pmd_fast (pmd_t *pmdp) { return 0; }
19 extern inline int free_pmd_slow (pmd_t *pmdp) { return 0; }
20 extern inline pmd_t *get_pmd_fast (void) { return (pmd_t *) 0; }
22 //todo: implement the following properly.
23 #define get_pte_fast() ((pte_t *) 0)
24 #define get_pte_slow pte_alloc
25 #define free_pte_fast(pte)
26 #define free_pte_slow pte_free
28 /* FIXME - when we get this compiling */
29 /* erm, now that it's compiling, what do we do with it? */
30 #define _KERNPG_TABLE 0
32 extern inline void pte_free_kernel(pte_t * pte)
34 free_page((unsigned long) pte);
37 extern const char bad_pmd_string[];
39 extern inline pte_t * pte_alloc_kernel(pmd_t * pmd, unsigned long address)
41 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
43 pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
46 pmd_val(*pmd) = _KERNPG_TABLE + __pa(page);
47 return page + address;
49 pmd_val(*pmd) = _KERNPG_TABLE + __pa((unsigned long)BAD_PAGETABLE);
52 free_page((unsigned long) page);
55 printk(bad_pmd_string, pmd_val(*pmd));
56 printk("at kernel pgd off %08x\n", (unsigned int)pmd);
57 pmd_val(*pmd) = _KERNPG_TABLE + __pa((unsigned long)BAD_PAGETABLE);
60 return (pte_t *) __pmd_page(*pmd) + address;
64 * allocating and freeing a pmd is trivial: the 1-entry pmd is
65 * inside the pgd, so has no extra memory associated with it.
67 extern inline void pmd_free_kernel(pmd_t * pmd)
72 extern inline pmd_t * pmd_alloc_kernel(pgd_t * pgd, unsigned long address)
77 #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
78 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
80 extern inline void pte_free(pte_t * pte)
82 free_page((unsigned long) pte);
85 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
87 unsigned long page = __get_free_page(GFP_KERNEL);
92 memset((void *)page, 0, PAGE_SIZE);
93 // pmd_val(*pmd) = SUN3_PMD_MAGIC + __pa(page);
94 /* pmd_val(*pmd) = __pa(page); */
95 return (pte_t *) (page);
98 #define pte_alloc_one_fast(mm,addr) pte_alloc_one(mm,addr)
100 #define pmd_populate(mm, pmd, pte) (pmd_val(*pmd) = __pa((unsigned long)pte))
103 * allocating and freeing a pmd is trivial: the 1-entry pmd is
104 * inside the pgd, so has no extra memory associated with it.
106 extern inline void pmd_free(pmd_t * pmd)
111 extern inline void pgd_free(pgd_t * pgd)
113 free_page((unsigned long) pgd);
116 extern inline pgd_t * pgd_alloc(struct mm_struct *mm)
120 new_pgd = (pgd_t *)get_free_page(GFP_KERNEL);
121 memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
122 memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT));
126 #define pgd_populate(mm, pmd, pte) BUG()
128 /* FIXME: the sun3 doesn't have a page table cache!
129 (but the motorola routine should just return 0) */
131 extern int do_check_pgt_cache(int, int);
133 extern inline void set_pgdir(unsigned long address, pgd_t entry)
137 /* Reserved PMEGs. */
138 extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
139 extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
140 extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
141 extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
143 /* Flush all userspace mappings one by one... (why no flush command,
145 static inline void flush_tlb_all(void)
148 unsigned char ctx, oldctx;
150 oldctx = sun3_get_context();
151 for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
152 for(ctx = 0; ctx < 8; ctx++) {
153 sun3_put_context(ctx);
154 sun3_put_segmap(addr, SUN3_INVALID_PMEG);
158 sun3_put_context(oldctx);
159 /* erase all of the userspace pmeg maps, we've clobbered them
161 for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
162 if(pmeg_alloc[addr] == 1) {
163 pmeg_alloc[addr] = 0;
165 pmeg_vaddr[addr] = 0;
171 /* Clear user TLB entries within the context named in mm */
172 static inline void flush_tlb_mm (struct mm_struct *mm)
174 unsigned char oldctx;
178 oldctx = sun3_get_context();
179 sun3_put_context(mm->context);
181 for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
182 seg = sun3_get_segmap(i);
183 if(seg == SUN3_INVALID_PMEG)
186 sun3_put_segmap(i, SUN3_INVALID_PMEG);
192 sun3_put_context(oldctx);
196 /* Flush a single TLB page. In this case, we're limited to flushing a
198 static inline void flush_tlb_page (struct vm_area_struct *vma,
201 unsigned char oldctx;
204 oldctx = sun3_get_context();
205 sun3_put_context(vma->vm_mm->context);
206 addr &= ~SUN3_PMEG_MASK;
207 if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
212 sun3_put_segmap (addr, SUN3_INVALID_PMEG);
214 sun3_put_context(oldctx);
217 /* Flush a range of pages from TLB. */
219 static inline void flush_tlb_range (struct mm_struct *mm,
220 unsigned long start, unsigned long end)
222 unsigned char seg, oldctx;
224 start &= ~SUN3_PMEG_MASK;
226 oldctx = sun3_get_context();
227 sun3_put_context(mm->context);
231 if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
233 if(pmeg_ctx[seg] == mm->context) {
238 sun3_put_segmap(start, SUN3_INVALID_PMEG);
240 start += SUN3_PMEG_SIZE;
244 /* Flush kernel page from TLB. */
245 static inline void flush_tlb_kernel_page (unsigned long addr)
247 sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
250 extern inline void flush_tlb_pgtables(struct mm_struct *mm,
251 unsigned long start, unsigned long end)
255 #endif /* SUN3_PGALLOC_H */