struct vm_operations_struct generic_file_vm_ops = {
};
+EXPORT_SYMBOL(vmalloc);
+EXPORT_SYMBOL(vfree);
+EXPORT_SYMBOL(vmalloc_to_page);
+EXPORT_SYMBOL(vmalloc_32);
+
/*
* Handle all mappings that got truncated by a "truncate()"
* system call.
return(i);
}
+EXPORT_SYMBOL(get_user_pages);
+
DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;
kfree(addr);
}
-void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask,
- pgprot_t prot)
+void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
/*
* kmalloc doesn't like __GFP_HIGHMEM for some reason
error_getting_vma:
up_write(&nommu_vma_sem);
kfree(vml);
- printk("Allocation of vml for %lu byte allocation from process %d failed\n",
+ printk("Allocation of vma for %lu byte allocation from process %d failed\n",
len, current->pid);
show_free_areas();
return -ENOMEM;
for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next)
if ((*parent)->vma->vm_start == addr &&
- (*parent)->vma->vm_end == end)
+ ((len == 0) || ((*parent)->vma->vm_end == end)))
goto found;
printk("munmap of non-mmaped memory by process %d (%s): %p\n",
int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
unsigned long to, unsigned long size, pgprot_t prot)
{
- return -EPERM;
+ vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
+ return 0;
}
void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
return -ENOMEM;
}
-void arch_unmap_area(struct vm_area_struct *area)
+void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
{
}
void update_mem_hiwater(struct task_struct *tsk)
{
- unsigned long rss = get_mm_counter(tsk->mm, rss);
+ unsigned long rss;
if (likely(tsk->mm)) {
+ rss = get_mm_counter(tsk->mm, rss);
if (tsk->mm->hiwater_rss < rss)
tsk->mm->hiwater_rss = rss;
if (tsk->mm->hiwater_vm < tsk->mm->total_vm)
leave 3% of the size of this process for other processes */
allowed -= current->mm->total_vm / 32;
- if (atomic_read(&vm_committed_space) < allowed)
+ /*
+ * cast `allowed' as a signed long because vm_committed_space
+ * sometimes has a negative value
+ */
+ if (atomic_read(&vm_committed_space) < (long)allowed)
return 0;
vm_unacct_memory(pages);