4 /* PAGE_SHIFT determines the page size */
6 #define PAGE_SIZE (1UL << PAGE_SHIFT)
7 #define PAGE_MASK (~(PAGE_SIZE-1))
12 #include <linux/config.h>
14 #ifdef CONFIG_X86_USE_3DNOW
18 #define clear_page(page) mmx_clear_page((void *)(page))
19 #define copy_page(to,from) mmx_copy_page(to,from)
24 * On older X86 processors its not a win to use MMX here it seems.
28 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
29 #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
33 #define clear_user_page(page, vaddr) clear_page(page)
34 #define copy_user_page(to, from, vaddr) copy_page(to, from)
37 * These are used to make use of C type-checking..
40 typedef struct { unsigned long pte_low, pte_high; } pte_t;
41 typedef struct { unsigned long long pmd; } pmd_t;
42 typedef struct { unsigned long long pgd; } pgd_t;
43 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
44 #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
46 typedef struct { unsigned long pte_low; } pte_t;
47 typedef struct { unsigned long pmd; } pmd_t;
48 typedef struct { unsigned long pgd; } pgd_t;
49 #define pte_val(x) ((x).pte_low)
50 #define __pte(x) ((pte_t) { (x) } )
52 #define PTE_MASK PAGE_MASK
54 typedef struct { unsigned long pgprot; } pgprot_t;
56 #define pmd_val(x) ((x).pmd)
57 #define pgd_val(x) ((x).pgd)
58 #define pgprot_val(x) ((x).pgprot)
60 #define __pmd(x) ((pmd_t) { (x) } )
61 #define __pgd(x) ((pgd_t) { (x) } )
62 #define __pgprot(x) ((pgprot_t) { (x) } )
64 #endif /* !__ASSEMBLY__ */
66 /* to align the pointer to the (next) page boundary */
67 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
70 * This handles the memory map.. We could make this a config
71 * option, but too many people screw it up, and too few need
74 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
75 * a virtual address space of one gigabyte, which limits the
76 * amount of physical memory you can use to about 950MB.
78 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
79 * and CONFIG_HIGHMEM64G options in the kernel configuration.
82 #define __PAGE_OFFSET (0xC0000000)
85 * This much address space is reserved for vmalloc() and iomap()
86 * as well as fixmap mappings.
88 #define __VMALLOC_RESERVE (128 << 20)
93 * Tell the user there is some problem. Beep too, so we can
94 * see^H^H^Hhear bugs in early bootup as well!
95 * The offending file and line are encoded after the "officially
96 * undefined" opcode for parsing in the trap handler.
99 #if 1 /* Set to zero for a slightly smaller kernel */
101 __asm__ __volatile__( "ud2\n" \
104 : : "i" (__LINE__), "i" (__FILE__))
106 #define BUG() __asm__ __volatile__("ud2\n")
109 #define PAGE_BUG(page) do { \
113 /* Pure 2^n version of get_order */
114 static __inline__ int get_order(unsigned long size)
118 size = (size-1) >> (PAGE_SHIFT-1);
127 #endif /* __ASSEMBLY__ */
129 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
130 #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
131 #define __MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
132 #define MAXMEM ((unsigned long)(-PAGE_OFFSET-VMALLOC_RESERVE))
133 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
134 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
135 #define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
136 #define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
138 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
139 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
141 #endif /* __KERNEL__ */
143 #endif /* _I386_PAGE_H */