http://downloads.netgear.com/files/GPL/DM111PSP_v3.61d_GPL.tar.gz
[bcm963xx.git] / kernel / linux / arch / ia64 / mm / hugetlbpage.c
1 /*
2  * IA-64 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
5  * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
6  *
7  * Sep, 2003: add numa support
8  * Feb, 2004: dynamic hugetlb page size via boot parameter
9  */
10
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/slab.h>
19 #include <linux/sysctl.h>
20 #include <asm/mman.h>
21 #include <asm/pgalloc.h>
22 #include <asm/tlb.h>
23 #include <asm/tlbflush.h>
24
25 unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
26
27 static pte_t *
28 huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
29 {
30         unsigned long taddr = htlbpage_to_page(addr);
31         pgd_t *pgd;
32         pmd_t *pmd;
33         pte_t *pte = NULL;
34
35         pgd = pgd_offset(mm, taddr);
36         pmd = pmd_alloc(mm, pgd, taddr);
37         if (pmd)
38                 pte = pte_alloc_map(mm, pmd, taddr);
39         return pte;
40 }
41
42 static pte_t *
43 huge_pte_offset (struct mm_struct *mm, unsigned long addr)
44 {
45         unsigned long taddr = htlbpage_to_page(addr);
46         pgd_t *pgd;
47         pmd_t *pmd;
48         pte_t *pte = NULL;
49
50         pgd = pgd_offset(mm, taddr);
51         if (pgd_present(*pgd)) {
52                 pmd = pmd_offset(pgd, taddr);
53                 if (pmd_present(*pmd))
54                         pte = pte_offset_map(pmd, taddr);
55         }
56
57         return pte;
58 }
59
60 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
61
62 static void
63 set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma,
64               struct page *page, pte_t * page_table, int write_access)
65 {
66         pte_t entry;
67
68         mm->rss += (HPAGE_SIZE / PAGE_SIZE);
69         if (write_access) {
70                 entry =
71                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
72         } else
73                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
74         entry = pte_mkyoung(entry);
75         mk_pte_huge(entry);
76         set_pte(page_table, entry);
77         return;
78 }
79 /*
80  * This function checks for proper alignment of input addr and len parameters.
81  */
82 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
83 {
84         if (len & ~HPAGE_MASK)
85                 return -EINVAL;
86         if (addr & ~HPAGE_MASK)
87                 return -EINVAL;
88         if (REGION_NUMBER(addr) != REGION_HPAGE)
89                 return -EINVAL;
90
91         return 0;
92 }
93
94 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
95                         struct vm_area_struct *vma)
96 {
97         pte_t *src_pte, *dst_pte, entry;
98         struct page *ptepage;
99         unsigned long addr = vma->vm_start;
100         unsigned long end = vma->vm_end;
101
102         while (addr < end) {
103                 dst_pte = huge_pte_alloc(dst, addr);
104                 if (!dst_pte)
105                         goto nomem;
106                 src_pte = huge_pte_offset(src, addr);
107                 entry = *src_pte;
108                 ptepage = pte_page(entry);
109                 get_page(ptepage);
110                 set_pte(dst_pte, entry);
111                 dst->rss += (HPAGE_SIZE / PAGE_SIZE);
112                 addr += HPAGE_SIZE;
113         }
114         return 0;
115 nomem:
116         return -ENOMEM;
117 }
118
119 int
120 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
121                     struct page **pages, struct vm_area_struct **vmas,
122                     unsigned long *st, int *length, int i)
123 {
124         pte_t *ptep, pte;
125         unsigned long start = *st;
126         unsigned long pstart;
127         int len = *length;
128         struct page *page;
129
130         do {
131                 pstart = start & HPAGE_MASK;
132                 ptep = huge_pte_offset(mm, start);
133                 pte = *ptep;
134
135 back1:
136                 page = pte_page(pte);
137                 if (pages) {
138                         page += ((start & ~HPAGE_MASK) >> PAGE_SHIFT);
139                         get_page(page);
140                         pages[i] = page;
141                 }
142                 if (vmas)
143                         vmas[i] = vma;
144                 i++;
145                 len--;
146                 start += PAGE_SIZE;
147                 if (((start & HPAGE_MASK) == pstart) && len &&
148                                 (start < vma->vm_end))
149                         goto back1;
150         } while (len && start < vma->vm_end);
151         *length = len;
152         *st = start;
153         return i;
154 }
155
156 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
157 {
158         struct page *page;
159         pte_t *ptep;
160
161         if (REGION_NUMBER(addr) != REGION_HPAGE)
162                 return ERR_PTR(-EINVAL);
163
164         ptep = huge_pte_offset(mm, addr);
165         if (!ptep || pte_none(*ptep))
166                 return NULL;
167         page = pte_page(*ptep);
168         page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
169         return page;
170 }
171 int pmd_huge(pmd_t pmd)
172 {
173         return 0;
174 }
175 struct page *
176 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
177 {
178         return NULL;
179 }
180
181 /*
182  * Same as generic free_pgtables(), except constant PGDIR_* and pgd_offset
183  * are hugetlb region specific.
184  */
185 void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *prev,
186         unsigned long start, unsigned long end)
187 {
188         unsigned long first = start & HUGETLB_PGDIR_MASK;
189         unsigned long last = end + HUGETLB_PGDIR_SIZE - 1;
190         unsigned long start_index, end_index;
191         struct mm_struct *mm = tlb->mm;
192
193         if (!prev) {
194                 prev = mm->mmap;
195                 if (!prev)
196                         goto no_mmaps;
197                 if (prev->vm_end > start) {
198                         if (last > prev->vm_start)
199                                 last = prev->vm_start;
200                         goto no_mmaps;
201                 }
202         }
203         for (;;) {
204                 struct vm_area_struct *next = prev->vm_next;
205
206                 if (next) {
207                         if (next->vm_start < start) {
208                                 prev = next;
209                                 continue;
210                         }
211                         if (last > next->vm_start)
212                                 last = next->vm_start;
213                 }
214                 if (prev->vm_end > first)
215                         first = prev->vm_end + HUGETLB_PGDIR_SIZE - 1;
216                 break;
217         }
218 no_mmaps:
219         if (last < first)       /* for arches with discontiguous pgd indices */
220                 return;
221         /*
222          * If the PGD bits are not consecutive in the virtual address, the
223          * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
224          */
225
226         start_index = pgd_index(htlbpage_to_page(first));
227         end_index = pgd_index(htlbpage_to_page(last));
228
229         if (end_index > start_index) {
230                 clear_page_tables(tlb, start_index, end_index - start_index);
231         }
232 }
233
234 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
235 {
236         struct mm_struct *mm = vma->vm_mm;
237         unsigned long address;
238         pte_t *pte;
239         struct page *page;
240
241         BUG_ON(start & (HPAGE_SIZE - 1));
242         BUG_ON(end & (HPAGE_SIZE - 1));
243
244         for (address = start; address < end; address += HPAGE_SIZE) {
245                 pte = huge_pte_offset(mm, address);
246                 if (pte_none(*pte))
247                         continue;
248                 page = pte_page(*pte);
249                 put_page(page);
250                 pte_clear(pte);
251         }
252         mm->rss -= (end - start) >> PAGE_SHIFT;
253         flush_tlb_range(vma, start, end);
254 }
255
256 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
257 {
258         struct mm_struct *mm = current->mm;
259         unsigned long addr;
260         int ret = 0;
261
262         BUG_ON(vma->vm_start & ~HPAGE_MASK);
263         BUG_ON(vma->vm_end & ~HPAGE_MASK);
264
265         spin_lock(&mm->page_table_lock);
266         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
267                 unsigned long idx;
268                 pte_t *pte = huge_pte_alloc(mm, addr);
269                 struct page *page;
270
271                 if (!pte) {
272                         ret = -ENOMEM;
273                         goto out;
274                 }
275                 if (!pte_none(*pte))
276                         continue;
277
278                 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
279                         + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
280                 page = find_get_page(mapping, idx);
281                 if (!page) {
282                         /* charge the fs quota first */
283                         if (hugetlb_get_quota(mapping)) {
284                                 ret = -ENOMEM;
285                                 goto out;
286                         }
287                         page = alloc_huge_page();
288                         if (!page) {
289                                 hugetlb_put_quota(mapping);
290                                 ret = -ENOMEM;
291                                 goto out;
292                         }
293                         ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
294                         if (! ret) {
295                                 unlock_page(page);
296                         } else {
297                                 hugetlb_put_quota(mapping);
298                                 page_cache_release(page);
299                                 goto out;
300                         }
301                 }
302                 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
303         }
304 out:
305         spin_unlock(&mm->page_table_lock);
306         return ret;
307 }
308
309 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
310                 unsigned long pgoff, unsigned long flags)
311 {
312         struct vm_area_struct *vmm;
313
314         if (len > RGN_MAP_LIMIT)
315                 return -ENOMEM;
316         if (len & ~HPAGE_MASK)
317                 return -EINVAL;
318         /* This code assumes that REGION_HPAGE != 0. */
319         if ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE - 1)))
320                 addr = HPAGE_REGION_BASE;
321         else
322                 addr = ALIGN(addr, HPAGE_SIZE);
323         for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
324                 /* At this point:  (!vmm || addr < vmm->vm_end). */
325                 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
326                         return -ENOMEM;
327                 if (!vmm || (addr + len) <= vmm->vm_start)
328                         return addr;
329                 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
330         }
331 }
332
333 static int __init hugetlb_setup_sz(char *str)
334 {
335         u64 tr_pages;
336         unsigned long long size;
337
338         if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
339                 /*
340                  * shouldn't happen, but just in case.
341                  */
342                 tr_pages = 0x15557000UL;
343
344         size = memparse(str, &str);
345         if (*str || (size & (size-1)) || !(tr_pages & size) ||
346                 size <= PAGE_SIZE ||
347                 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
348                 printk(KERN_WARNING "Invalid huge page size specified\n");
349                 return 1;
350         }
351
352         hpage_shift = __ffs(size);
353         /*
354          * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
355          * override here with new page shift.
356          */
357         ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
358         return 1;
359 }
360 __setup("hugepagesz=", hugetlb_setup_sz);