2 * include/asm-s390/pgalloc.h
5 * Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hpenner@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
13 #ifndef _S390_PGALLOC_H
14 #define _S390_PGALLOC_H
16 #include <linux/config.h>
17 #include <asm/processor.h>
18 #include <linux/threads.h>
19 #include <linux/slab.h>
21 #define pgd_quicklist (S390_lowcore.cpu_data.pgd_quick)
22 #define pmd_quicklist (S390_lowcore.cpu_data.pmd_quick)
23 #define pte_quicklist (S390_lowcore.cpu_data.pte_quick)
24 #define pgtable_cache_size (S390_lowcore.cpu_data.pgtable_cache_sz)
26 extern void diag10(unsigned long addr);
29 * Allocate and free page tables. The xxx_kernel() versions are
30 * used to allocate a kernel page table - this turns on ASN bits
35 * page directory allocation/free routines.
37 extern __inline__ pgd_t *get_pgd_slow (void)
42 ret = (pgd_t *) __get_free_pages(GFP_KERNEL, 1);
44 for (i = 0; i < PTRS_PER_PGD; i++)
49 extern __inline__ pgd_t *get_pgd_fast (void)
51 unsigned long *ret = pgd_quicklist;
54 pgd_quicklist = (unsigned long *)(*ret);
56 pgtable_cache_size -= 2;
61 extern __inline__ pgd_t *pgd_alloc (struct mm_struct *mm)
71 extern __inline__ void free_pgd_fast (pgd_t *pgd)
73 *(unsigned long *) pgd = (unsigned long) pgd_quicklist;
74 pgd_quicklist = (unsigned long *) pgd;
75 pgtable_cache_size += 2;
78 extern __inline__ void free_pgd_slow (pgd_t *pgd)
80 free_pages((unsigned long) pgd, 1);
83 #define pgd_free(pgd) free_pgd_fast(pgd)
85 extern pmd_t *pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd);
88 * page middle directory allocation/free routines.
90 extern inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
95 pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, 1);
97 for (i=0; i < PTRS_PER_PMD; i++)
103 extern __inline__ pmd_t *
104 pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
106 unsigned long *ret = (unsigned long *) pmd_quicklist;
109 pmd_quicklist = (unsigned long *)(*ret);
111 pgtable_cache_size -= 2;
113 return (pmd_t *) ret;
116 extern void pmd_free_order2(pmd_t *);
117 extern __inline__ void pmd_free_fast (pmd_t *pmd)
119 if (test_bit(PG_arch_1, &virt_to_page(pmd)->flags) == 0) {
120 *(unsigned long *) pmd = (unsigned long) pmd_quicklist;
121 pmd_quicklist = (unsigned long *) pmd;
122 pgtable_cache_size += 2;
124 pmd_free_order2(pmd);
127 extern __inline__ void pmd_free_slow (pmd_t *pmd)
129 free_pages((unsigned long) pmd, 1);
132 #define pmd_free(pmd) pmd_free_fast(pmd)
134 extern inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
136 pmd_val(*pmd) = _PMD_ENTRY | __pa(pte);
137 pmd_val1(*pmd) = _PMD_ENTRY | __pa(pte+256);
141 * page table entry allocation/free routines.
143 extern inline pte_t * pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
148 pte = (pte_t *) __get_free_page(GFP_KERNEL);
150 for (i=0; i < PTRS_PER_PTE; i++)
156 extern __inline__ pte_t* pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
158 unsigned long *ret = (unsigned long *) pte_quicklist;
161 pte_quicklist = (unsigned long *)(*ret);
163 pgtable_cache_size--;
168 extern __inline__ void pte_free_fast (pte_t *pte)
170 *(unsigned long *) pte = (unsigned long) pte_quicklist;
171 pte_quicklist = (unsigned long *) pte;
172 pgtable_cache_size++;
175 extern __inline__ void pte_free_slow (pte_t *pte)
177 free_page((unsigned long) pte);
180 #define pte_free(pte) pte_free_fast(pte)
182 extern int do_check_pgt_cache (int, int);
185 * This establishes kernel virtual mappings (e.g., as a result of a
186 * vmalloc call). Since s390-esame uses a separate kernel page table,
187 * there is nothing to do here... :)
189 #define set_pgdir(vmaddr, entry) do { } while(0)
194 * - flush_tlb() flushes the current mm struct TLBs
195 * - flush_tlb_all() flushes all processes TLBs
196 * called only from vmalloc/vfree
197 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
198 * - flush_tlb_page(vma, vmaddr) flushes one page
199 * - flush_tlb_range(mm, start, end) flushes a range of pages
200 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
204 * S/390 has three ways of flushing TLBs
205 * 'ptlb' does a flush of the local processor
206 * 'csp' flushes the TLBs on all PUs of a SMP
207 * 'ipte' invalidates a pte in a page table and flushes that out of
208 * the TLBs of all PUs of a SMP
211 #define local_flush_tlb() \
212 do { __asm__ __volatile__("ptlb": : :"memory"); } while (0)
218 * We always need to flush, since s390 does not flush tlb
219 * on each context switch
222 static inline void flush_tlb(void)
226 static inline void flush_tlb_all(void)
230 static inline void flush_tlb_mm(struct mm_struct *mm)
234 static inline void flush_tlb_page(struct vm_area_struct *vma,
239 static inline void flush_tlb_range(struct mm_struct *mm,
240 unsigned long start, unsigned long end)
249 static inline void global_flush_tlb(void)
253 __asm__ __volatile__ (
260 : : "a" (&dummy) : "cc", "2", "3", "4" );
264 * We only have to do global flush of tlb if process run since last
265 * flush on any other pu than current.
266 * If we have threads (mm->count > 1) we always do a global flush,
267 * since the process runs on more than one processor at the same time.
269 static inline void __flush_tlb_mm(struct mm_struct * mm)
271 if (mm->cpu_vm_mask != (1UL << smp_processor_id())) {
272 /* mm was active on more than one cpu. */
273 if (mm == current->active_mm &&
274 atomic_read(&mm->mm_users) == 1)
275 /* this cpu is the only one using the mm. */
276 mm->cpu_vm_mask = 1UL << smp_processor_id();
282 static inline void flush_tlb(void)
284 __flush_tlb_mm(current->mm);
286 static inline void flush_tlb_all(void)
290 static inline void flush_tlb_mm(struct mm_struct *mm)
294 static inline void flush_tlb_page(struct vm_area_struct *vma,
297 __flush_tlb_mm(vma->vm_mm);
299 static inline void flush_tlb_range(struct mm_struct *mm,
300 unsigned long start, unsigned long end)
307 extern inline void flush_tlb_pgtables(struct mm_struct *mm,
308 unsigned long start, unsigned long end)
310 /* S/390 does not keep any page table caches in TLB */
314 static inline int ptep_test_and_clear_and_flush_young(struct vm_area_struct *vma,
315 unsigned long address, pte_t *ptep)
317 /* No need to flush TLB; bits are in storage key */
318 return ptep_test_and_clear_young(ptep);
321 static inline int ptep_test_and_clear_and_flush_dirty(struct vm_area_struct *vma,
322 unsigned long address, pte_t *ptep)
324 /* No need to flush TLB; bits are in storage key */
325 return ptep_test_and_clear_dirty(ptep);
328 static inline pte_t ptep_invalidate(struct vm_area_struct *vma,
329 unsigned long address, pte_t *ptep)
332 if (!(pte_val(pte) & _PAGE_INVALID))
333 __asm__ __volatile__ ("ipte %0,%1" : : "a" (ptep), "a" (address));
338 static inline void ptep_establish(struct vm_area_struct *vma,
339 unsigned long address, pte_t *ptep, pte_t entry)
341 ptep_invalidate(vma, address, ptep);
342 set_pte(ptep, entry);
345 #endif /* _S390_PGALLOC_H */