2 * IA-64 Huge TLB Page Support for Kernel.
4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
7 #include <linux/config.h>
8 #include <linux/init.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/sysctl.h>
17 #include <asm/pgalloc.h>
21 #define TASK_HPAGE_BASE (REGION_HPAGE << REGION_SHIFT)
23 static long htlbpagemem;
25 static long htlbzone_pages;
27 struct vm_operations_struct hugetlb_vm_ops;
28 static LIST_HEAD(htlbpage_freelist);
29 static spinlock_t htlbpage_lock = SPIN_LOCK_UNLOCKED;
31 static struct page *alloc_hugetlb_page(void)
36 spin_lock(&htlbpage_lock);
37 if (list_empty(&htlbpage_freelist)) {
38 spin_unlock(&htlbpage_lock);
42 page = list_entry(htlbpage_freelist.next, struct page, list);
43 list_del(&page->list);
45 spin_unlock(&htlbpage_lock);
46 set_page_count(page, 1);
47 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); ++i)
48 clear_highpage(&page[i]);
53 huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
55 unsigned long taddr = htlbpage_to_page(addr);
60 pgd = pgd_offset(mm, taddr);
61 pmd = pmd_alloc(mm, pgd, taddr);
63 pte = pte_alloc(mm, pmd, taddr);
68 huge_pte_offset (struct mm_struct *mm, unsigned long addr)
70 unsigned long taddr = htlbpage_to_page(addr);
75 pgd = pgd_offset(mm, taddr);
76 if (pgd_present(*pgd)) {
77 pmd = pmd_offset(pgd, taddr);
78 if (pmd_present(*pmd))
79 pte = pte_offset(pmd, taddr);
85 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
88 set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma,
89 struct page *page, pte_t * page_table, int write_access)
93 mm->rss += (HPAGE_SIZE / PAGE_SIZE);
96 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
98 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
99 entry = pte_mkyoung(entry);
101 set_pte(page_table, entry);
105 * This function checks for proper alignment of input addr and len parameters.
107 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
109 if (len & ~HPAGE_MASK)
111 if (addr & ~HPAGE_MASK)
113 if (REGION_NUMBER(addr) != REGION_HPAGE)
118 /* This function checks if the address and address+len falls out of HugeTLB region. It
119 * return -EINVAL if any part of address range falls in HugeTLB region.
121 int is_invalid_hugepage_range(unsigned long addr, unsigned long len)
123 if (REGION_NUMBER(addr) == REGION_HPAGE)
125 if (REGION_NUMBER(addr+len) == REGION_HPAGE)
131 * Same as generic free_pgtables(), except constant PGDIR_* and pgd_offset
132 * are hugetlb region specific.
134 void hugetlb_free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev,
135 unsigned long start, unsigned long end)
137 unsigned long first = start & HUGETLB_PGDIR_MASK;
138 unsigned long last = end + HUGETLB_PGDIR_SIZE - 1;
139 unsigned long start_index, end_index;
145 if (prev->vm_end > start) {
146 if (last > prev->vm_start)
147 last = prev->vm_start;
152 struct vm_area_struct *next = prev->vm_next;
155 if (next->vm_start < start) {
159 if (last > next->vm_start)
160 last = next->vm_start;
162 if (prev->vm_end > first)
163 first = prev->vm_end + HUGETLB_PGDIR_SIZE - 1;
170 * If the PGD bits are not consecutive in the virtual address, the
171 * old method of shifting the VA >> by PGDIR_SHIFT doesn't work.
173 start_index = pgd_index(htlbpage_to_page(first));
174 end_index = pgd_index(htlbpage_to_page(last));
175 if (end_index > start_index) {
176 clear_page_tables(mm, start_index, end_index - start_index);
177 flush_tlb_pgtables(mm, first & HUGETLB_PGDIR_MASK,
178 last & HUGETLB_PGDIR_MASK);
182 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
183 struct vm_area_struct *vma)
185 pte_t *src_pte, *dst_pte, entry;
186 struct page *ptepage;
187 unsigned long addr = vma->vm_start;
188 unsigned long end = vma->vm_end;
191 dst_pte = huge_pte_alloc(dst, addr);
194 src_pte = huge_pte_offset(src, addr);
196 ptepage = pte_page(entry);
198 set_pte(dst_pte, entry);
199 dst->rss += (HPAGE_SIZE / PAGE_SIZE);
208 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
209 struct page **pages, struct vm_area_struct **vmas,
210 unsigned long *st, int *length, int i)
213 unsigned long start = *st;
214 unsigned long pstart;
220 ptep = huge_pte_offset(mm, start);
224 page = pte_page(pte);
226 page += ((start & ~HPAGE_MASK) >> PAGE_SHIFT);
234 if (((start & HPAGE_MASK) == pstart) && len &&
235 (start < vma->vm_end))
237 } while (len && start < vma->vm_end);
243 void free_huge_page(struct page *page)
245 BUG_ON(page_count(page));
246 BUG_ON(page->mapping);
248 INIT_LIST_HEAD(&page->list);
250 spin_lock(&htlbpage_lock);
251 list_add(&page->list, &htlbpage_freelist);
253 spin_unlock(&htlbpage_lock);
256 void huge_page_release(struct page *page)
258 if (!put_page_testzero(page))
261 free_huge_page(page);
264 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
266 struct mm_struct *mm = vma->vm_mm;
267 unsigned long address;
271 BUG_ON(start & (HPAGE_SIZE - 1));
272 BUG_ON(end & (HPAGE_SIZE - 1));
274 for (address = start; address < end; address += HPAGE_SIZE) {
275 pte = huge_pte_offset(mm, address);
276 if (!pte || pte_none(*pte))
278 page = pte_page(*pte);
279 huge_page_release(page);
282 mm->rss -= (end - start) >> PAGE_SHIFT;
283 flush_tlb_range(mm, start, end);
286 void zap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long length)
288 struct mm_struct *mm = vma->vm_mm;
289 spin_lock(&mm->page_table_lock);
290 unmap_hugepage_range(vma, start, start + length);
291 spin_unlock(&mm->page_table_lock);
294 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
296 struct mm_struct *mm = current->mm;
297 struct inode *inode = mapping->host;
301 BUG_ON(vma->vm_start & ~HPAGE_MASK);
302 BUG_ON(vma->vm_end & ~HPAGE_MASK);
304 spin_lock(&mm->page_table_lock);
305 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
307 pte_t *pte = huge_pte_alloc(mm, addr);
317 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
318 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
319 page = find_get_page(mapping, idx);
321 /* charge the fs quota first */
322 if (hugetlb_get_quota(mapping)) {
326 page = alloc_hugetlb_page();
328 hugetlb_put_quota(mapping);
332 add_to_page_cache(page, mapping, idx);
335 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
338 spin_unlock(&mm->page_table_lock);
342 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
343 unsigned long pgoff, unsigned long flags)
345 struct vm_area_struct *vmm;
347 if (len > RGN_MAP_LIMIT)
349 if (len & ~HPAGE_MASK)
351 /* This code assumes that REGION_HPAGE != 0. */
352 if ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE - 1)))
353 addr = TASK_HPAGE_BASE;
355 addr = COLOR_HALIGN(addr);
356 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
357 /* At this point: (!vmm || addr < vmm->vm_end). */
358 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
360 if (!vmm || (addr + len) <= vmm->vm_start)
362 addr = COLOR_HALIGN(vmm->vm_end);
365 void update_and_free_page(struct page *page)
372 for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
373 map->flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
374 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved);
375 set_page_count(map, 0);
378 set_page_count(page, 1);
379 __free_pages(page, HUGETLB_PAGE_ORDER);
382 int try_to_free_low(int count)
385 struct page *page, *map;
388 spin_lock(&htlbpage_lock);
389 list_for_each(p, &htlbpage_freelist) {
391 list_del(&map->list);
392 update_and_free_page(map);
398 page = list_entry(p, struct page, list);
399 if ((page_zone(page))->name[0] != 'H') //Look for non-Highmem zones.
403 list_del(&map->list);
404 update_and_free_page(map);
408 spin_unlock(&htlbpage_lock);
412 int set_hugetlb_mem_size(int count)
415 struct page *page, *map;
420 lcount = count - htlbzone_pages;
423 return (int)htlbzone_pages;
424 if (lcount > 0) { /* Increase the mem size. */
426 page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER);
430 for (j = 0; j < (HPAGE_SIZE / PAGE_SIZE); j++) {
431 SetPageReserved(map);
434 spin_lock(&htlbpage_lock);
435 list_add(&page->list, &htlbpage_freelist);
438 spin_unlock(&htlbpage_lock);
440 return (int) htlbzone_pages;
442 /* Shrink the memory size. */
443 lcount = try_to_free_low(lcount);
444 while (lcount++ < 0) {
445 page = alloc_hugetlb_page();
448 spin_lock(&htlbpage_lock);
449 update_and_free_page(page);
450 spin_unlock(&htlbpage_lock);
452 return (int) htlbzone_pages;
455 int hugetlb_sysctl_handler(ctl_table *table, int write, struct file *file, void *buffer, size_t *length)
457 proc_dointvec(table, write, file, buffer, length);
458 htlbpage_max = set_hugetlb_mem_size(htlbpage_max);
462 static int __init hugetlb_setup(char *s)
464 if (sscanf(s, "%d", &htlbpage_max) <= 0)
468 __setup("hugepages=", hugetlb_setup);
470 static int __init hugetlb_init(void)
475 for (i = 0; i < htlbpage_max; ++i) {
476 page = alloc_pages(__GFP_HIGHMEM, HUGETLB_PAGE_ORDER);
479 for (j = 0; j < HPAGE_SIZE/PAGE_SIZE; ++j)
480 SetPageReserved(&page[j]);
481 spin_lock(&htlbpage_lock);
482 list_add(&page->list, &htlbpage_freelist);
483 spin_unlock(&htlbpage_lock);
485 htlbpage_max = htlbpagemem = htlbzone_pages = i;
486 printk("Total HugeTLB memory allocated, %ld\n", htlbpagemem);
489 module_init(hugetlb_init);
491 int hugetlb_report_meminfo(char *buf)
494 "HugePages_Total: %5lu\n"
495 "HugePages_Free: %5lu\n"
496 "Hugepagesize: %5lu kB\n",
502 int is_hugepage_mem_enough(size_t size)
504 if (size > (htlbpagemem << HPAGE_SHIFT))
509 static struct page *hugetlb_nopage(struct vm_area_struct * area, unsigned long address, int unused)
515 struct vm_operations_struct hugetlb_vm_ops = {
516 .nopage = hugetlb_nopage,