4 /* PAGE_SHIFT determines the page size */
6 #define PAGE_SIZE (1UL << PAGE_SHIFT)
8 * Subtle: this is an int (not an unsigned long) and so it
9 * gets extended to 64 bits the way want (i.e. with 1s). -- paulus
11 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
14 #include <linux/config.h>
16 #define PAGE_OFFSET CONFIG_KERNEL_START
17 #define KERNELBASE PAGE_OFFSET
21 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
22 * physical addressing. For now this just the IBM PPC440.
24 #ifdef CONFIG_PTE_64BIT
25 typedef unsigned long long pte_basic_t;
26 #define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
27 #define PTE_FMT "%16Lx"
29 typedef unsigned long pte_basic_t;
30 #define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
31 #define PTE_FMT "%.8lx"
34 #include <asm/system.h> /* for xmon definition */
38 printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
43 printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
44 __asm__ __volatile__(".long 0x0"); \
47 #define PAGE_BUG(page) do { BUG(); } while (0)
49 #define STRICT_MM_TYPECHECKS
51 #ifdef STRICT_MM_TYPECHECKS
53 * These are used to make use of C type-checking..
55 typedef struct { pte_basic_t pte; } pte_t;
56 typedef struct { unsigned long pmd; } pmd_t;
57 typedef struct { unsigned long pgd; } pgd_t;
58 typedef struct { unsigned long pgprot; } pgprot_t;
60 #define pte_val(x) ((x).pte)
61 #define pmd_val(x) ((x).pmd)
62 #define pgd_val(x) ((x).pgd)
63 #define pgprot_val(x) ((x).pgprot)
65 #define __pte(x) ((pte_t) { (x) } )
66 #define __pmd(x) ((pmd_t) { (x) } )
67 #define __pgd(x) ((pgd_t) { (x) } )
68 #define __pgprot(x) ((pgprot_t) { (x) } )
72 * .. while these make it easier on the compiler
74 typedef unsigned long pte_t;
75 typedef unsigned long pmd_t;
76 typedef unsigned long pgd_t;
77 typedef unsigned long pgprot_t;
79 #define pte_val(x) (x)
80 #define pmd_val(x) (x)
81 #define pgd_val(x) (x)
82 #define pgprot_val(x) (x)
87 #define __pgprot(x) (x)
92 /* align addr on a size boundry - adjust address up if needed -- Cort */
93 #define _ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1)))
95 /* to align the pointer to the (next) page boundary */
96 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
98 extern void clear_page(void *page);
99 extern void copy_page(void *to, void *from);
100 extern void clear_user_page(void *page, unsigned long vaddr);
101 extern void copy_user_page(void *to, void *from, unsigned long vaddr);
103 extern unsigned long ppc_memstart;
104 extern unsigned long ppc_memoffset;
106 #define PPC_MEMSTART 0
107 #define PPC_MEMOFFSET PAGE_OFFSET
109 #define PPC_MEMSTART ppc_memstart
110 #define PPC_MEMOFFSET ppc_memoffset
113 #if defined(CONFIG_APUS) && !defined(MODULE)
114 /* map phys->virtual and virtual->phys for RAM pages */
115 static inline unsigned long ___pa(unsigned long v)
118 asm volatile ("1: addis %0, %1, %2;"
119 ".section \".vtop_fixup\",\"aw\";"
124 : "b" (v), "K" (((-PAGE_OFFSET) >> 16) & 0xffff));
128 static inline void* ___va(unsigned long p)
131 asm volatile ("1: addis %0, %1, %2;"
132 ".section \".ptov_fixup\",\"aw\";"
137 : "b" (p), "K" (((PAGE_OFFSET) >> 16) & 0xffff));
142 #define ___pa(vaddr) ((vaddr)-PPC_MEMOFFSET)
143 #define ___va(paddr) ((paddr)+PPC_MEMOFFSET)
146 #define __pa(x) ___pa((unsigned long)(x))
147 #define __va(x) ((void *)(___va((unsigned long)(x))))
149 #define MAP_PAGE_RESERVED (1<<15)
150 #define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr)-PAGE_OFFSET) >> PAGE_SHIFT))
151 #define VALID_PAGE(page) (((page) - mem_map) < max_mapnr)
153 extern unsigned long get_zero_page_fast(void);
155 /* Pure 2^n version of get_order */
156 extern __inline__ int get_order(unsigned long size)
160 size = (size-1) >> (PAGE_SHIFT-1);
169 #endif /* __ASSEMBLY__ */
171 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
172 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
174 #endif /* __KERNEL__ */
175 #endif /* _PPC_PAGE_H */