1 #ifndef _ASM_IA64_PGALLOC_H
2 #define _ASM_IA64_PGALLOC_H
5 * This file contains the functions and defines necessary to allocate
8 * This hopefully works with any (fixed) ia-64 page-size, as defined
9 * in <asm/page.h> (currently 8192).
11 * Copyright (C) 1998-2002 Hewlett-Packard Co
12 * David Mosberger-Tang <davidm@hpl.hp.com>
13 * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
16 #include <linux/config.h>
18 #include <linux/compiler.h>
20 #include <linux/threads.h>
22 #include <asm/mmu_context.h>
23 #include <asm/processor.h>
26 * Very stupidly, we used to get new pgd's and pmd's, init their contents
27 * to point to the NULL versions of the next level page table, later on
28 * completely re-init them the same way, then free them up. This wasted
29 * a lot of work and caused unnecessary memory traffic. How broken...
30 * We fix this by caching them.
32 #define pgd_quicklist (local_cpu_data->pgd_quick)
33 #define pmd_quicklist (local_cpu_data->pmd_quick)
34 #define pte_quicklist (local_cpu_data->pte_quick)
35 #define pgtable_cache_size (local_cpu_data->pgtable_cache_sz)
38 pgd_alloc_one_fast (struct mm_struct *mm)
40 unsigned long *ret = pgd_quicklist;
42 if (__builtin_expect(ret != NULL, 1)) {
43 pgd_quicklist = (unsigned long *)(*ret);
52 pgd_alloc (struct mm_struct *mm)
54 /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */
55 pgd_t *pgd = pgd_alloc_one_fast(mm);
57 if (__builtin_expect(pgd == NULL, 0)) {
58 pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
59 if (__builtin_expect(pgd != NULL, 1))
68 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
69 pgd_quicklist = (unsigned long *) pgd;
74 pgd_populate (struct mm_struct *mm, pgd_t *pgd_entry, pmd_t *pmd)
76 pgd_val(*pgd_entry) = __pa(pmd);
81 pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
83 unsigned long *ret = (unsigned long *)pmd_quicklist;
85 if (__builtin_expect(ret != NULL, 1)) {
86 pmd_quicklist = (unsigned long *)(*ret);
94 pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
96 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
98 if (__builtin_expect(pmd != NULL, 1))
104 pmd_free (pmd_t *pmd)
106 *(unsigned long *)pmd = (unsigned long) pmd_quicklist;
107 pmd_quicklist = (unsigned long *) pmd;
108 ++pgtable_cache_size;
112 pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
114 pmd_val(*pmd_entry) = __pa(pte);
118 pte_alloc_one_fast (struct mm_struct *mm, unsigned long addr)
120 unsigned long *ret = (unsigned long *)pte_quicklist;
122 if (__builtin_expect(ret != NULL, 1)) {
123 pte_quicklist = (unsigned long *)(*ret);
125 --pgtable_cache_size;
132 pte_alloc_one (struct mm_struct *mm, unsigned long addr)
134 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
136 if (__builtin_expect(pte != NULL, 1))
142 pte_free (pte_t *pte)
144 *(unsigned long *)pte = (unsigned long) pte_quicklist;
145 pte_quicklist = (unsigned long *) pte;
146 ++pgtable_cache_size;
149 extern int do_check_pgt_cache (int, int);
152 * Now for some TLB flushing routines. This is the kind of stuff that
153 * can be very expensive, so try to avoid them whenever possible.
157 * Flush everything (kernel mapping may also have changed due to
160 extern void local_flush_tlb_all (void);
163 extern void smp_flush_tlb_all (void);
164 extern void smp_flush_tlb_mm (struct mm_struct *mm);
165 # define flush_tlb_all() smp_flush_tlb_all()
167 # define flush_tlb_all() local_flush_tlb_all()
171 local_flush_tlb_mm (struct mm_struct *mm)
173 if (mm == current->active_mm)
174 activate_context(mm);
178 * Flush a specified user mapping. This is called, e.g., as a result of fork() and
179 * exit(). fork() ends up here because the copy-on-write mechanism needs to write-protect
180 * the PTEs of the parent task.
183 flush_tlb_mm (struct mm_struct *mm)
190 if (atomic_read(&mm->mm_users) == 0)
191 return; /* happens as a result of exit_mmap() */
194 smp_flush_tlb_mm(mm);
196 local_flush_tlb_mm(mm);
200 extern void flush_tlb_range (struct mm_struct *mm, unsigned long start, unsigned long end);
203 * Page-granular tlb flush.
206 flush_tlb_page (struct vm_area_struct *vma, unsigned long addr)
209 flush_tlb_range(vma->vm_mm, (addr & PAGE_MASK), (addr & PAGE_MASK) + PAGE_SIZE);
211 if (vma->vm_mm == current->active_mm)
212 asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(PAGE_SHIFT << 2) : "memory");
214 vma->vm_mm->context = 0;
219 * Flush the TLB entries mapping the virtually mapped linear page
220 * table corresponding to address range [START-END).
223 flush_tlb_pgtables (struct mm_struct *mm, unsigned long start, unsigned long end)
225 if (unlikely(end - start >= 1024*1024*1024*1024UL
226 || rgn_index(start) != rgn_index(end - 1)))
228 * This condition is very rare and normal applications shouldn't get
229 * here. No attempt has been made to optimize for this case.
233 flush_tlb_range(mm, ia64_thash(start), ia64_thash(end));
237 * Cache flushing routines. This is the kind of stuff that can be very expensive, so try
238 * to avoid them whenever possible.
241 #define flush_cache_all() do { } while (0)
242 #define flush_cache_mm(mm) do { } while (0)
243 #define flush_cache_range(mm, start, end) do { } while (0)
244 #define flush_cache_page(vma, vmaddr) do { } while (0)
245 #define flush_page_to_ram(page) do { } while (0)
246 #define flush_icache_page(vma,page) do { } while (0)
248 #define flush_dcache_page(page) \
250 clear_bit(PG_arch_1, &(page)->flags); \
253 extern void flush_icache_range (unsigned long start, unsigned long end);
255 #define flush_icache_user_range(vma, page, user_addr, len) \
257 unsigned long _addr = (unsigned long) page_address(page) + ((user_addr) & ~PAGE_MASK); \
258 flush_icache_range(_addr, _addr + (len)); \
262 clear_user_page (void *addr, unsigned long vaddr, struct page *page)
265 flush_dcache_page(page);
269 copy_user_page (void *to, void *from, unsigned long vaddr, struct page *page)
272 flush_dcache_page(page);
276 * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
277 * information. However, we use this macro to take care of any (delayed) i-cache flushing
278 * that may be necessary.
281 update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
287 return; /* not an executable page... */
289 page = pte_page(pte);
290 /* don't use VADDR: it may not be mapped on this CPU (or may have just been flushed): */
291 addr = (unsigned long) page_address(page);
293 if (test_bit(PG_arch_1, &page->flags))
294 return; /* i-cache is already coherent with d-cache */
296 flush_icache_range(addr, addr + PAGE_SIZE);
297 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
300 #endif /* _ASM_IA64_PGALLOC_H */