2 * BK Id: SCCS/s.iSeries_mmu.c 1.5 11/06/01 16:22:12 trini
5 * Procedures for MMU handling on iSeries systems, where we
6 * have to call the hypervisor to change things in the hash
9 * Copyright (C) 2001 IBM Corp.
10 * updated by Dave Boutcher (boutcher@us.ibm.com)
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 #include <linux/config.h>
18 #include <linux/signal.h>
19 #include <linux/sched.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/string.h>
23 #include <linux/types.h>
24 #include <linux/ptrace.h>
25 #include <linux/mman.h>
27 #include <linux/swap.h>
28 #include <linux/stddef.h>
29 #include <linux/vmalloc.h>
30 #include <linux/init.h>
31 #include <linux/delay.h>
32 #include <linux/bootmem.h>
33 #include <linux/highmem.h>
35 #include <asm/pgalloc.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
41 #include <asm/bootx.h>
42 #include <asm/machdep.h>
43 #include <asm/setup.h>
44 #include <asm/iSeries/LparData.h>
45 #include <asm/iSeries/HvCallHpt.h>
46 #include <linux/pci.h>
47 #include <asm/iSeries/iSeries_dma.h>
50 int iSeries_max_kernel_hpt_slot = 0;
51 extern unsigned maxPacas;
52 extern int iSeries_hpt_loaded;
56 extern int iSeries_Is_IoMmAddress(unsigned long address);
58 /*********************************************************************/
59 /* iSeries maps I/O space to device, just leave the address where is.*/
60 /*********************************************************************/
61 void* ioremap(unsigned long addr, unsigned long size)
66 void* __ioremap(unsigned long addr, unsigned long size, unsigned long flags)
71 /********************************************************************/
72 /* iSeries did not remapped the space. */
73 /********************************************************************/
74 void iounmap(void *addr)
78 #endif /* CONFIG_PCI */
81 * Map as much of memory as will fit into the first entry of each
82 * PPC HPTE Group. (These are the "bolted" entries which will
83 * never be cast out). The iSeries Hypervisor has already mapped
84 * the first 32 MB (specified in LparMap.h). Here we map as
85 * much more as we can.
88 void __init MMU_init_hw(void)
91 u64 *hpte0Ptr, *hpte1Ptr;
92 u32 HptSizeGroups, msPages, rpn, vsid, ea;
96 unsigned long numAdded;
98 if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
100 hpte0Ptr = (u64 *)&hpte;
101 hpte1Ptr = hpte0Ptr + 1;
103 /* Get the number of Hpt groups */
104 HptSizeGroups = (u32)HvCallHpt_getHptPages() * 32;
105 Hash_mask = HptSizeGroups - 1;
107 /* Number of pages in memory */
108 msPages = totalLpChunks << 6;
110 /* For each virtual page in kernel space, add a hpte if there
111 isn't one already in slot 0 of the primary pteg. */
115 for ( ea = (u32)KERNELBASE; ea < (u32)high_memory; ea+= PAGE_SIZE) {
118 vsid = ((ea >> 28) * 0x111);
120 rtnIndex = HvCallHpt_findValid( &hpte,
121 (rpn & 0xffff) | (vsid << 16));
122 hpteIndex = (u32)rtnIndex;
123 if ( hpte.v ) /* If valid entry found */
124 continue; /* Already mapped, nothing to do */
125 if ( rtnIndex == ~0 ) /* If no free entry found */
126 BUG(); /* Can't map this page bolted */
127 if ( rtnIndex >> 63 ) /* If first free slot is secondary */
128 BUG(); /* Can't map this page bolted */
129 if ( (hpteIndex & 7) > 2) /* Not in first 3 slots */
132 * If returned index is the first in the primary group
133 * then build an hpt entry for this page.
135 *hpte0Ptr = *hpte1Ptr = 0;
137 hpte.api = (rpn >> 11) & 0x1f;
140 hpte.rpn = physRpn_to_absRpn( rpn );
148 HvCallHpt_addValidate( hpteIndex, 0, &hpte );
150 group = rtnIndex & 0x07;
151 if (group > iSeries_max_kernel_hpt_slot)
152 iSeries_max_kernel_hpt_slot = group;
155 printk( "iSeries_hashinit: added %ld hptes to existing mapping. Max group %x\n",
156 numAdded, iSeries_max_kernel_hpt_slot );
158 if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
160 iSeries_hpt_loaded = 1;
161 Hash = (void *)0xFFFFFFFF;
165 * This is called at the end of handling a user page fault, when the
166 * fault has been handled by updating a PTE in the linux page tables.
167 * We use it to preload an HPTE into the hash table corresponding to
168 * the updated linux PTE.
170 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
173 struct mm_struct *mm;
176 static int nopreload;
180 mm = (address < TASK_SIZE)? vma->vm_mm: &init_mm;
181 pmd = pmd_offset(pgd_offset(mm, address), address);
182 if (!pmd_none(*pmd)) {
183 ptep = pte_offset(pmd, address);
184 add_hash_page(mm->context, address, ptep);