1 /* $Id: pgtable.h,v 1.109 2001/11/13 00:49:32 davem Exp $ */
2 #ifndef _SPARC_PGTABLE_H
3 #define _SPARC_PGTABLE_H
5 /* asm-sparc/pgtable.h: Defines and functions used to work
6 * with Sparc page tables.
8 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
9 * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
12 #include <linux/config.h>
13 #include <linux/spinlock.h>
16 #include <asm/pgtsun4.h>
18 #include <asm/pgtsun4c.h>
20 #include <asm/pgtsrmmu.h>
21 #include <asm/vac-ops.h>
22 #include <asm/oplib.h>
24 #include <asm/btfixup.h>
25 #include <asm/system.h>
29 extern void load_mmu(void);
30 extern unsigned long calc_highpages(void);
32 BTFIXUPDEF_CALL(void, quick_kernel_fault, unsigned long)
34 #define quick_kernel_fault(addr) BTFIXUP_CALL(quick_kernel_fault)(addr)
36 /* Routines for data transfer buffers. */
37 BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
38 BTFIXUPDEF_CALL(void, mmu_unlockarea, char *, unsigned long)
40 #define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
41 #define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
43 /* These are implementations for sbus_map_sg/sbus_unmap_sg... collapse later */
44 BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, char *, unsigned long, struct sbus_bus *sbus)
45 BTFIXUPDEF_CALL(void, mmu_get_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
46 BTFIXUPDEF_CALL(void, mmu_release_scsi_one, __u32, unsigned long, struct sbus_bus *sbus)
47 BTFIXUPDEF_CALL(void, mmu_release_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
49 #define mmu_get_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_get_scsi_one)(vaddr,len,sbus)
50 #define mmu_get_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_get_scsi_sgl)(sg,sz,sbus)
51 #define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
52 #define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
55 * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
57 BTFIXUPDEF_CALL(void, mmu_map_dma_area, unsigned long va, __u32 addr, int len)
58 BTFIXUPDEF_CALL(unsigned long /*phys*/, mmu_translate_dvma, unsigned long busa)
59 BTFIXUPDEF_CALL(void, mmu_unmap_dma_area, unsigned long busa, int len)
61 #define mmu_map_dma_area(va, ba,len) BTFIXUP_CALL(mmu_map_dma_area)(va,ba,len)
62 #define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
63 #define mmu_translate_dvma(ba) BTFIXUP_CALL(mmu_translate_dvma)(ba)
65 BTFIXUPDEF_SIMM13(pmd_shift)
66 BTFIXUPDEF_SETHI(pmd_size)
67 BTFIXUPDEF_SETHI(pmd_mask)
69 extern unsigned int pmd_align(unsigned int addr) __attribute__((const));
70 extern __inline__ unsigned int pmd_align(unsigned int addr)
72 return ((addr + ~BTFIXUP_SETHI(pmd_mask)) & BTFIXUP_SETHI(pmd_mask));
75 BTFIXUPDEF_SIMM13(pgdir_shift)
76 BTFIXUPDEF_SETHI(pgdir_size)
77 BTFIXUPDEF_SETHI(pgdir_mask)
79 extern unsigned int pgdir_align(unsigned int addr) __attribute__((const));
80 extern __inline__ unsigned int pgdir_align(unsigned int addr)
82 return ((addr + ~BTFIXUP_SETHI(pgdir_mask)) & BTFIXUP_SETHI(pgdir_mask));
85 BTFIXUPDEF_SIMM13(ptrs_per_pte)
86 BTFIXUPDEF_SIMM13(ptrs_per_pmd)
87 BTFIXUPDEF_SIMM13(ptrs_per_pgd)
88 BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
90 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
92 #define pte_ERROR(e) __builtin_trap()
93 #define pmd_ERROR(e) __builtin_trap()
94 #define pgd_ERROR(e) __builtin_trap()
96 BTFIXUPDEF_INT(page_none)
97 BTFIXUPDEF_INT(page_shared)
98 BTFIXUPDEF_INT(page_copy)
99 BTFIXUPDEF_INT(page_readonly)
100 BTFIXUPDEF_INT(page_kernel)
102 #define PMD_SHIFT BTFIXUP_SIMM13(pmd_shift)
103 #define PMD_SIZE BTFIXUP_SETHI(pmd_size)
104 #define PMD_MASK BTFIXUP_SETHI(pmd_mask)
105 #define PMD_ALIGN(addr) pmd_align(addr)
106 #define PGDIR_SHIFT BTFIXUP_SIMM13(pgdir_shift)
107 #define PGDIR_SIZE BTFIXUP_SETHI(pgdir_size)
108 #define PGDIR_MASK BTFIXUP_SETHI(pgdir_mask)
109 #define PGDIR_ALIGN pgdir_align(addr)
110 #define PTRS_PER_PTE BTFIXUP_SIMM13(ptrs_per_pte)
111 #define PTRS_PER_PMD BTFIXUP_SIMM13(ptrs_per_pmd)
112 #define PTRS_PER_PGD BTFIXUP_SIMM13(ptrs_per_pgd)
113 #define USER_PTRS_PER_PGD BTFIXUP_SIMM13(user_ptrs_per_pgd)
114 #define FIRST_USER_PGD_NR 0
116 #define PAGE_NONE __pgprot(BTFIXUP_INT(page_none))
117 #define PAGE_SHARED __pgprot(BTFIXUP_INT(page_shared))
118 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
119 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
121 extern unsigned long page_kernel;
124 #define PAGE_KERNEL page_kernel
126 #define PAGE_KERNEL __pgprot(BTFIXUP_INT(page_kernel))
129 /* Top-level page directory */
130 extern pgd_t swapper_pg_dir[1024];
132 /* Page table for 0-4MB for everybody, on the Sparc this
133 * holds the same as on the i386.
135 extern pte_t pg0[1024];
136 extern pte_t pg1[1024];
137 extern pte_t pg2[1024];
138 extern pte_t pg3[1024];
140 extern unsigned long ptr_in_current_pgd;
142 /* Here is a trick, since mmap.c need the initializer elements for
143 * protection_map[] to be constant at compile time, I set the following
144 * to all zeros. I set it to the real values after I link in the
145 * appropriate MMU page table routines at boot time.
147 #define __P000 __pgprot(0)
148 #define __P001 __pgprot(0)
149 #define __P010 __pgprot(0)
150 #define __P011 __pgprot(0)
151 #define __P100 __pgprot(0)
152 #define __P101 __pgprot(0)
153 #define __P110 __pgprot(0)
154 #define __P111 __pgprot(0)
156 #define __S000 __pgprot(0)
157 #define __S001 __pgprot(0)
158 #define __S010 __pgprot(0)
159 #define __S011 __pgprot(0)
160 #define __S100 __pgprot(0)
161 #define __S101 __pgprot(0)
162 #define __S110 __pgprot(0)
163 #define __S111 __pgprot(0)
165 extern int num_contexts;
167 /* First physical page can be anywhere, the following is needed so that
168 * va-->pa and vice versa conversions work properly without performance
169 * hit for all __pa()/__va() operations.
171 extern unsigned long phys_base;
174 * BAD_PAGETABLE is used when we need a bogus page-table, while
175 * BAD_PAGE is used for a bogus page.
177 * ZERO_PAGE is a global shared page that is always zero: used
178 * for zero-mapped memory areas etc..
180 extern pte_t * __bad_pagetable(void);
181 extern pte_t __bad_page(void);
182 extern unsigned long empty_zero_page;
184 #define BAD_PAGETABLE __bad_pagetable()
185 #define BAD_PAGE __bad_page()
186 #define ZERO_PAGE(vaddr) (mem_map + (((unsigned long)&empty_zero_page - PAGE_OFFSET + phys_base) >> PAGE_SHIFT))
188 /* number of bits that fit into a memory pointer */
189 #define BITS_PER_PTR (8*sizeof(unsigned long))
191 /* to align the pointer to a pointer address */
192 #define PTR_MASK (~(sizeof(void*)-1))
194 #define SIZEOF_PTR_LOG2 2
196 BTFIXUPDEF_CALL_CONST(unsigned long, pmd_page, pmd_t)
197 BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page, pgd_t)
199 #define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
200 #define pgd_page(pgd) BTFIXUP_CALL(pgd_page)(pgd)
202 BTFIXUPDEF_SETHI(none_mask)
203 BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
204 BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
206 extern __inline__ int pte_none(pte_t pte)
208 return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask));
211 #define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
212 #define pte_clear(pte) BTFIXUP_CALL(pte_clear)(pte)
214 BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
215 BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
216 BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
218 extern __inline__ int pmd_none(pmd_t pmd)
220 return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask));
223 #define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd)
224 #define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd)
225 #define pmd_clear(pmd) BTFIXUP_CALL(pmd_clear)(pmd)
227 BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t)
228 BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t)
229 BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t)
230 BTFIXUPDEF_CALL(void, pgd_clear, pgd_t *)
232 #define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd)
233 #define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd)
234 #define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd)
235 #define pgd_clear(pgd) BTFIXUP_CALL(pgd_clear)(pgd)
238 * The following only work if pte_present() is true.
239 * Undefined behaviour if not..
241 BTFIXUPDEF_HALF(pte_writei)
242 BTFIXUPDEF_HALF(pte_dirtyi)
243 BTFIXUPDEF_HALF(pte_youngi)
245 extern int pte_write(pte_t pte) __attribute__((const));
246 extern __inline__ int pte_write(pte_t pte)
248 return pte_val(pte) & BTFIXUP_HALF(pte_writei);
251 extern int pte_dirty(pte_t pte) __attribute__((const));
252 extern __inline__ int pte_dirty(pte_t pte)
254 return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
257 extern int pte_young(pte_t pte) __attribute__((const));
258 extern __inline__ int pte_young(pte_t pte)
260 return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
263 BTFIXUPDEF_HALF(pte_wrprotecti)
264 BTFIXUPDEF_HALF(pte_mkcleani)
265 BTFIXUPDEF_HALF(pte_mkoldi)
267 extern pte_t pte_wrprotect(pte_t pte) __attribute__((const));
268 extern __inline__ pte_t pte_wrprotect(pte_t pte)
270 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
273 extern pte_t pte_mkclean(pte_t pte) __attribute__((const));
274 extern __inline__ pte_t pte_mkclean(pte_t pte)
276 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
279 extern pte_t pte_mkold(pte_t pte) __attribute__((const));
280 extern __inline__ pte_t pte_mkold(pte_t pte)
282 return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
285 BTFIXUPDEF_CALL_CONST(pte_t, pte_mkwrite, pte_t)
286 BTFIXUPDEF_CALL_CONST(pte_t, pte_mkdirty, pte_t)
287 BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
289 #define pte_mkwrite(pte) BTFIXUP_CALL(pte_mkwrite)(pte)
290 #define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
291 #define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
293 #define page_pte_prot(page, prot) mk_pte(page, prot)
294 #define page_pte(page) page_pte_prot(page, __pgprot(0))
296 BTFIXUPDEF_CALL(struct page *, pte_page, pte_t)
297 #define pte_page(pte) BTFIXUP_CALL(pte_page)(pte)
300 * Conversion functions: convert a page and protection to a page entry,
301 * and a page entry and page directory to the page they refer to.
303 BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, struct page *, pgprot_t)
305 BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t)
306 BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
308 #define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot)
309 #define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot)
310 #define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space)
312 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
313 BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
315 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
316 #define pmd_set(pmdp,ptep) BTFIXUP_CALL(pmd_set)(pmdp,ptep)
318 BTFIXUPDEF_INT(pte_modify_mask)
320 extern pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute__((const));
321 extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
323 return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
324 pgprot_val(newprot));
327 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
329 /* to find an entry in a page-table-directory */
330 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
332 /* to find an entry in a kernel page-table-directory */
333 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
335 BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
336 BTFIXUPDEF_CALL(pte_t *, pte_offset, pmd_t *, unsigned long)
338 /* Find an entry in the second-level page table.. */
339 #define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr)
341 /* Find an entry in the third-level page table.. */
342 #define pte_offset(dir,addr) BTFIXUP_CALL(pte_offset)(dir,addr)
344 /* The permissions for pgprot_val to make a page mapped on the obio space */
345 extern unsigned int pg_iobits;
347 #define flush_icache_page(vma, pg) do { } while(0)
348 #define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
350 /* Certain architectures need to do special things when pte's
351 * within a page table are directly modified. Thus, the following
352 * hook is made available.
355 BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t)
357 #define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval)
360 BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *)
362 #define mmu_info(p) BTFIXUP_CALL(mmu_info)(p)
364 /* Fault handler stuff... */
365 #define FAULT_CODE_PROT 0x1
366 #define FAULT_CODE_WRITE 0x2
367 #define FAULT_CODE_USER 0x4
369 BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t)
371 #define update_mmu_cache(vma,addr,pte) BTFIXUP_CALL(update_mmu_cache)(vma,addr,pte)
373 extern int invalid_segment;
375 /* Encode and de-code a swap entry */
376 #define SWP_TYPE(x) (((x).val >> 2) & 0x7f)
377 #define SWP_OFFSET(x) (((x).val >> 9) & 0x3ffff)
378 #define SWP_ENTRY(type,offset) ((swp_entry_t) { (((type) & 0x7f) << 2) | (((offset) & 0x3ffff) << 9) })
379 #define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
380 #define swp_entry_to_pte(x) ((pte_t) { (x).val })
383 struct ctx_list *next;
384 struct ctx_list *prev;
385 unsigned int ctx_number;
386 struct mm_struct *ctx_mm;
389 extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
390 extern struct ctx_list ctx_free; /* Head of free list */
391 extern struct ctx_list ctx_used; /* Head of used contexts list */
393 #define NO_CONTEXT -1
395 extern __inline__ void remove_from_ctx_list(struct ctx_list *entry)
397 entry->next->prev = entry->prev;
398 entry->prev->next = entry->next;
401 extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
404 (entry->prev = head->prev)->next = entry;
407 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
408 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
410 extern __inline__ unsigned long
411 __get_phys (unsigned long addr)
413 switch (sparc_cpu_model){
416 return sun4c_get_pte (addr) << PAGE_SHIFT;
419 return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
425 extern __inline__ int
426 __get_iospace (unsigned long addr)
428 switch (sparc_cpu_model){
431 return -1; /* Don't check iospace on sun4c */
434 return (srmmu_get_pte (addr) >> 28);
440 extern unsigned long *sparc_valid_addr_bitmap;
442 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
443 #define kern_addr_valid(addr) \
444 (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
446 extern int io_remap_page_range(unsigned long from, unsigned long to,
447 unsigned long size, pgprot_t prot, int space);
449 #include <asm-generic/pgtable.h>
451 #endif /* !(__ASSEMBLY__) */
453 /* We provide our own get_unmapped_area to cope with VA holes for userland */
454 #define HAVE_ARCH_UNMAPPED_AREA
457 * No page table caches to initialise
459 #define pgtable_cache_init() do { } while (0)
461 #endif /* !(_SPARC_PGTABLE_H) */