[PATCH] slab: extract slabinfo header printing to separate function
[powerpc.git] / mm / hugetlb.c
index da8a211..f4c43d7 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/highmem.h>
 #include <linux/nodemask.h>
 #include <linux/pagemap.h>
+#include <linux/mempolicy.h>
+
 #include <asm/page.h>
 #include <asm/pgtable.h>
 
@@ -36,18 +38,21 @@ static void enqueue_huge_page(struct page *page)
        free_huge_pages_node[nid]++;
 }
 
-static struct page *dequeue_huge_page(void)
+static struct page *dequeue_huge_page(struct vm_area_struct *vma,
+                               unsigned long address)
 {
        int nid = numa_node_id();
        struct page *page = NULL;
+       struct zonelist *zonelist = huge_zonelist(vma, address);
+       struct zone **z;
 
-       if (list_empty(&hugepage_freelists[nid])) {
-               for (nid = 0; nid < MAX_NUMNODES; ++nid)
-                       if (!list_empty(&hugepage_freelists[nid]))
-                               break;
+       for (z = zonelist->zones; *z; z++) {
+               nid = (*z)->zone_pgdat->node_id;
+               if (!list_empty(&hugepage_freelists[nid]))
+                       break;
        }
-       if (nid >= 0 && nid < MAX_NUMNODES &&
-           !list_empty(&hugepage_freelists[nid])) {
+
+       if (*z) {
                page = list_entry(hugepage_freelists[nid].next,
                                  struct page, lru);
                list_del(&page->lru);
@@ -85,13 +90,13 @@ void free_huge_page(struct page *page)
        spin_unlock(&hugetlb_lock);
 }
 
-struct page *alloc_huge_page(void)
+struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr)
 {
        struct page *page;
        int i;
 
        spin_lock(&hugetlb_lock);
-       page = dequeue_huge_page();
+       page = dequeue_huge_page(vma, addr);
        if (!page) {
                spin_unlock(&hugetlb_lock);
                return NULL;
@@ -194,7 +199,7 @@ static unsigned long set_max_huge_pages(unsigned long count)
        spin_lock(&hugetlb_lock);
        try_to_free_low(count);
        while (count < nr_huge_pages) {
-               struct page *page = dequeue_huge_page();
+               struct page *page = dequeue_huge_page(NULL, 0);
                if (!page)
                        break;
                update_and_free_page(page);
@@ -363,42 +368,6 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
        flush_tlb_range(vma, start, end);
 }
 
-static struct page *find_or_alloc_huge_page(struct address_space *mapping,
-                               unsigned long idx, int shared)
-{
-       struct page *page;
-       int err;
-
-retry:
-       page = find_lock_page(mapping, idx);
-       if (page)
-               goto out;
-
-       if (hugetlb_get_quota(mapping))
-               goto out;
-       page = alloc_huge_page();
-       if (!page) {
-               hugetlb_put_quota(mapping);
-               goto out;
-       }
-
-       if (shared) {
-               err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
-               if (err) {
-                       put_page(page);
-                       hugetlb_put_quota(mapping);
-                       if (err == -EEXIST)
-                               goto retry;
-                       page = NULL;
-               }
-       } else {
-               /* Caller expects a locked page */
-               lock_page(page);
-       }
-out:
-       return page;
-}
-
 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, pte_t *ptep, pte_t pte)
 {
@@ -416,7 +385,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        page_cache_get(old_page);
-       new_page = alloc_huge_page();
+       new_page = alloc_huge_page(vma, address);
 
        if (!new_page) {
                page_cache_release(old_page);
@@ -465,12 +434,31 @@ int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * Use page lock to guard against racing truncation
         * before we get page_table_lock.
         */
-       page = find_or_alloc_huge_page(mapping, idx,
-                       vma->vm_flags & VM_SHARED);
-       if (!page)
-               goto out;
+retry:
+       page = find_lock_page(mapping, idx);
+       if (!page) {
+               if (hugetlb_get_quota(mapping))
+                       goto out;
+               page = alloc_huge_page(vma, address);
+               if (!page) {
+                       hugetlb_put_quota(mapping);
+                       goto out;
+               }
 
-       BUG_ON(!PageLocked(page));
+               if (vma->vm_flags & VM_SHARED) {
+                       int err;
+
+                       err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
+                       if (err) {
+                               put_page(page);
+                               hugetlb_put_quota(mapping);
+                               if (err == -EEXIST)
+                                       goto retry;
+                               goto out;
+                       }
+               } else
+                       lock_page(page);
+       }
 
        spin_lock(&mm->page_table_lock);
        size = i_size_read(mapping->host) >> HPAGE_SHIFT;