return err;
}
-struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
- unsigned long start, unsigned long end, int node)
+static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
+ unsigned long start, unsigned long end,
+ int node, gfp_t gfp_mask)
{
struct vm_struct **p, *tmp, *area;
unsigned long align = 1;
unsigned long addr;
+ BUG_ON(in_interrupt());
if (flags & VM_IOREMAP) {
int bit = fls(size);
}
addr = ALIGN(start, align);
size = PAGE_ALIGN(size);
-
- area = kmalloc_node(sizeof(*area), GFP_KERNEL, node);
- if (unlikely(!area))
+ if (unlikely(!size))
return NULL;
- if (unlikely(!size)) {
- kfree (area);
+ area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node);
+ if (unlikely(!area))
return NULL;
- }
/*
* We always allocate a guard page.
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end)
{
- return __get_vm_area_node(size, flags, start, end, -1);
+ return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
}
/**
return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
}
-struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node)
+struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
+ int node, gfp_t gfp_mask)
{
- return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node);
+ return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
+ gfp_mask);
}
/* Caller must hold vmlist_lock */
if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
area->flags |= VM_VPAGES;
- } else
- pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node);
+ } else {
+ pages = kmalloc_node(array_size,
+ (gfp_mask & GFP_LEVEL_MASK),
+ node);
+ }
area->pages = pages;
if (!area->pages) {
remove_vm_area(area->addr);
if (!size || (size >> PAGE_SHIFT) > num_physpages)
return NULL;
- area = get_vm_area_node(size, VM_ALLOC, node);
+ area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
if (!area)
return NULL;
void *ret;
ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
- write_lock(&vmlist_lock);
- area = __find_vm_area(ret);
- area->flags |= VM_USERMAP;
- write_unlock(&vmlist_lock);
-
+ if (ret) {
+ write_lock(&vmlist_lock);
+ area = __find_vm_area(ret);
+ area->flags |= VM_USERMAP;
+ write_unlock(&vmlist_lock);
+ }
return ret;
}
EXPORT_SYMBOL(vmalloc_user);
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
}
+#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
+#define GFP_VMALLOC32 GFP_DMA32
+#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
+#define GFP_VMALLOC32 GFP_DMA
+#else
+#define GFP_VMALLOC32 GFP_KERNEL
+#endif
+
/**
* vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
* @size: allocation size
*/
void *vmalloc_32(unsigned long size)
{
- return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
+ return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
}
EXPORT_SYMBOL(vmalloc_32);
struct vm_struct *area;
void *ret;
- ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
- write_lock(&vmlist_lock);
- area = __find_vm_area(ret);
- area->flags |= VM_USERMAP;
- write_unlock(&vmlist_lock);
-
+ ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
+ if (ret) {
+ write_lock(&vmlist_lock);
+ area = __find_vm_area(ret);
+ area->flags |= VM_USERMAP;
+ write_unlock(&vmlist_lock);
+ }
return ret;
}
EXPORT_SYMBOL(vmalloc_32_user);
* that it is big enough to cover the vma. Will return failure if
* that criteria isn't met.
*
- * Similar to remap_pfn_range (see mm/memory.c)
+ * Similar to remap_pfn_range() (see mm/memory.c)
*/
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff)