2 * include/asm-s390/pgalloc.h
5 * Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hpenner@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
13 #ifndef _S390_PGALLOC_H
14 #define _S390_PGALLOC_H
16 #include <linux/config.h>
17 #include <asm/processor.h>
18 #include <linux/threads.h>
19 #include <linux/slab.h>
21 #define pgd_quicklist (S390_lowcore.cpu_data.pgd_quick)
22 #define pmd_quicklist (S390_lowcore.cpu_data.pmd_quick)
23 #define pte_quicklist (S390_lowcore.cpu_data.pte_quick)
24 #define pgtable_cache_size (S390_lowcore.cpu_data.pgtable_cache_sz)
27 * Allocate and free page tables. The xxx_kernel() versions are
28 * used to allocate a kernel page table - this turns on ASN bits
33 * page directory allocation/free routines.
35 extern __inline__ pgd_t *get_pgd_slow (void)
40 ret = (pgd_t *) __get_free_pages(GFP_KERNEL, 1);
42 for (i = 0; i < PTRS_PER_PGD; i++)
47 extern __inline__ pgd_t *get_pgd_fast (void)
49 unsigned long *ret = pgd_quicklist;
52 pgd_quicklist = (unsigned long *)(*ret);
54 pgtable_cache_size -= 2;
59 extern __inline__ pgd_t *pgd_alloc (struct mm_struct *mm)
69 extern __inline__ void free_pgd_fast (pgd_t *pgd)
71 *(unsigned long *) pgd = (unsigned long) pgd_quicklist;
72 pgd_quicklist = (unsigned long *) pgd;
73 pgtable_cache_size += 2;
76 extern __inline__ void free_pgd_slow (pgd_t *pgd)
78 free_pages((unsigned long) pgd, 1);
81 #define pgd_free(pgd) free_pgd_fast(pgd)
83 extern pmd_t *pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd);
86 * page middle directory allocation/free routines.
88 extern inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
93 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, 1);
95 for (i=0; i < PTRS_PER_PMD; i++)
101 extern __inline__ pmd_t *
102 pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
104 unsigned long *ret = (unsigned long *) pmd_quicklist;
107 pmd_quicklist = (unsigned long *)(*ret);
109 pgtable_cache_size -= 2;
111 return (pmd_t *) ret;
114 extern void pmd_free_order2(pmd_t *);
115 extern __inline__ void pmd_free_fast (pmd_t *pmd)
117 if (test_bit(PG_arch_1, &virt_to_page(pmd)->flags) == 0) {
118 *(unsigned long *) pmd = (unsigned long) pmd_quicklist;
119 pmd_quicklist = (unsigned long *) pmd;
120 pgtable_cache_size += 2;
122 pmd_free_order2(pmd);
125 extern __inline__ void pmd_free_slow (pmd_t *pmd)
127 free_pages((unsigned long) pmd, 1);
130 #define pmd_free(pmd) pmd_free_fast(pmd)
132 extern inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
134 pmd_val(*pmd) = _PMD_ENTRY | __pa(pte);
135 pmd_val1(*pmd) = _PMD_ENTRY | __pa(pte+256);
139 * page table entry allocation/free routines.
141 extern inline pte_t * pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
146 pte = (pte_t *) __get_free_page(GFP_KERNEL);
148 for (i=0; i < PTRS_PER_PTE; i++)
154 extern __inline__ pte_t* pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
156 unsigned long *ret = (unsigned long *) pte_quicklist;
159 pte_quicklist = (unsigned long *)(*ret);
161 pgtable_cache_size--;
166 extern __inline__ void pte_free_fast (pte_t *pte)
168 *(unsigned long *) pte = (unsigned long) pte_quicklist;
169 pte_quicklist = (unsigned long *) pte;
170 pgtable_cache_size++;
173 extern __inline__ void pte_free_slow (pte_t *pte)
175 free_page((unsigned long) pte);
178 #define pte_free(pte) pte_free_fast(pte)
180 extern int do_check_pgt_cache (int, int);
183 * This establishes kernel virtual mappings (e.g., as a result of a
184 * vmalloc call). Since s390-esame uses a separate kernel page table,
185 * there is nothing to do here... :)
187 #define set_pgdir(vmaddr, entry) do { } while(0)
192 * - flush_tlb() flushes the current mm struct TLBs
193 * - flush_tlb_all() flushes all processes TLBs
194 * called only from vmalloc/vfree
195 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
196 * - flush_tlb_page(vma, vmaddr) flushes one page
197 * - flush_tlb_range(mm, start, end) flushes a range of pages
198 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
202 * S/390 has three ways of flushing TLBs
203 * 'ptlb' does a flush of the local processor
204 * 'csp' flushes the TLBs on all PUs of a SMP
205 * 'ipte' invalidates a pte in a page table and flushes that out of
206 * the TLBs of all PUs of a SMP
209 #define local_flush_tlb() \
210 do { __asm__ __volatile__("ptlb": : :"memory"); } while (0)
216 * We always need to flush, since s390 does not flush tlb
217 * on each context switch
220 static inline void flush_tlb(void)
224 static inline void flush_tlb_all(void)
228 static inline void flush_tlb_mm(struct mm_struct *mm)
232 static inline void flush_tlb_page(struct vm_area_struct *vma,
237 static inline void flush_tlb_range(struct mm_struct *mm,
238 unsigned long start, unsigned long end)
247 static inline void global_flush_tlb(void)
251 __asm__ __volatile__ (
258 : : "a" (&dummy) : "cc", "2", "3", "4" );
262 * We only have to do global flush of tlb if process run since last
263 * flush on any other pu than current.
264 * If we have threads (mm->count > 1) we always do a global flush,
265 * since the process runs on more than one processor at the same time.
267 static inline void __flush_tlb_mm(struct mm_struct * mm)
269 if ((smp_num_cpus > 1) &&
270 ((atomic_read(&mm->mm_count) != 1) ||
271 (mm->cpu_vm_mask != (1UL << smp_processor_id())))) {
272 mm->cpu_vm_mask = (1UL << smp_processor_id());
279 static inline void flush_tlb(void)
281 __flush_tlb_mm(current->mm);
283 static inline void flush_tlb_all(void)
287 static inline void flush_tlb_mm(struct mm_struct *mm)
291 static inline void flush_tlb_page(struct vm_area_struct *vma,
294 __flush_tlb_mm(vma->vm_mm);
296 static inline void flush_tlb_range(struct mm_struct *mm,
297 unsigned long start, unsigned long end)
304 extern inline void flush_tlb_pgtables(struct mm_struct *mm,
305 unsigned long start, unsigned long end)
307 /* S/390 does not keep any page table caches in TLB */
311 static inline int ptep_test_and_clear_and_flush_young(struct vm_area_struct *vma,
312 unsigned long address, pte_t *ptep)
314 /* No need to flush TLB; bits are in storage key */
315 return ptep_test_and_clear_young(ptep);
318 static inline int ptep_test_and_clear_and_flush_dirty(struct vm_area_struct *vma,
319 unsigned long address, pte_t *ptep)
321 /* No need to flush TLB; bits are in storage key */
322 return ptep_test_and_clear_dirty(ptep);
325 static inline pte_t ptep_invalidate(struct vm_area_struct *vma,
326 unsigned long address, pte_t *ptep)
329 if (!(pte_val(pte) & _PAGE_INVALID))
330 __asm__ __volatile__ ("ipte %0,%1" : : "a" (ptep), "a" (address));
335 static inline void ptep_establish(struct vm_area_struct *vma,
336 unsigned long address, pte_t *ptep, pte_t entry)
338 ptep_invalidate(vma, address, ptep);
339 set_pte(ptep, entry);
342 #endif /* _S390_PGALLOC_H */