Slab allocators: remove useless __GFP_NO_GROW flag
[powerpc.git] / mm / vmalloc.c
index 6d381df..cb5aabd 100644 (file)
@@ -181,15 +181,12 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl
        }
        addr = ALIGN(start, align);
        size = PAGE_ALIGN(size);
-
-       area = kmalloc_node(sizeof(*area), gfp_mask, node);
-       if (unlikely(!area))
+       if (unlikely(!size))
                return NULL;
 
-       if (unlikely(!size)) {
-               kfree (area);
+       area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node);
+       if (unlikely(!area))
                return NULL;
-       }
 
        /*
         * We always allocate a guard page.
@@ -434,7 +431,7 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                area->flags |= VM_VPAGES;
        } else {
                pages = kmalloc_node(array_size,
-                               (gfp_mask & ~(__GFP_HIGHMEM | __GFP_ZERO)),
+                               (gfp_mask & GFP_LEVEL_MASK),
                                node);
        }
        area->pages = pages;
@@ -532,11 +529,12 @@ void *vmalloc_user(unsigned long size)
        void *ret;
 
        ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
-       write_lock(&vmlist_lock);
-       area = __find_vm_area(ret);
-       area->flags |= VM_USERMAP;
-       write_unlock(&vmlist_lock);
-
+       if (ret) {
+               write_lock(&vmlist_lock);
+               area = __find_vm_area(ret);
+               area->flags |= VM_USERMAP;
+               write_unlock(&vmlist_lock);
+       }
        return ret;
 }
 EXPORT_SYMBOL(vmalloc_user);
@@ -579,6 +577,14 @@ void *vmalloc_exec(unsigned long size)
        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
 }
 
+#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
+#define GFP_VMALLOC32 GFP_DMA32
+#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
+#define GFP_VMALLOC32 GFP_DMA
+#else
+#define GFP_VMALLOC32 GFP_KERNEL
+#endif
+
 /**
  *     vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
  *     @size:          allocation size
@@ -588,7 +594,7 @@ void *vmalloc_exec(unsigned long size)
  */
 void *vmalloc_32(unsigned long size)
 {
-       return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
+       return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
 }
 EXPORT_SYMBOL(vmalloc_32);
 
@@ -604,12 +610,13 @@ void *vmalloc_32_user(unsigned long size)
        struct vm_struct *area;
        void *ret;
 
-       ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
-       write_lock(&vmlist_lock);
-       area = __find_vm_area(ret);
-       area->flags |= VM_USERMAP;
-       write_unlock(&vmlist_lock);
-
+       ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
+       if (ret) {
+               write_lock(&vmlist_lock);
+               area = __find_vm_area(ret);
+               area->flags |= VM_USERMAP;
+               write_unlock(&vmlist_lock);
+       }
        return ret;
 }
 EXPORT_SYMBOL(vmalloc_32_user);
@@ -700,7 +707,7 @@ finished:
  *     that it is big enough to cover the vma. Will return failure if
  *     that criteria isn't met.
  *
- *     Similar to remap_pfn_range (see mm/memory.c)
+ *     Similar to remap_pfn_range() (see mm/memory.c)
  */
 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
                                                unsigned long pgoff)