4 * (C) Copyright 1995 Linus Torvalds
6 #include <linux/slab.h>
8 #include <linux/mman.h>
9 #include <linux/smp_lock.h>
10 #include <linux/pagemap.h>
12 #include <asm/uaccess.h>
13 #include <asm/pgtable.h>
15 static inline int mlock_fixup_all(struct vm_area_struct * vma, int newflags)
17 spin_lock(&vma->vm_mm->page_table_lock);
18 vma->vm_flags = newflags;
19 spin_unlock(&vma->vm_mm->page_table_lock);
23 static inline int mlock_fixup_start(struct vm_area_struct * vma,
24 unsigned long end, int newflags)
26 struct vm_area_struct * n;
28 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
33 n->vm_flags = newflags;
37 if (n->vm_ops && n->vm_ops->open)
39 vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
40 lock_vma_mappings(vma);
41 spin_lock(&vma->vm_mm->page_table_lock);
43 __insert_vm_struct(current->mm, n);
44 spin_unlock(&vma->vm_mm->page_table_lock);
45 unlock_vma_mappings(vma);
49 static inline int mlock_fixup_end(struct vm_area_struct * vma,
50 unsigned long start, int newflags)
52 struct vm_area_struct * n;
54 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
59 n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
60 n->vm_flags = newflags;
64 if (n->vm_ops && n->vm_ops->open)
66 lock_vma_mappings(vma);
67 spin_lock(&vma->vm_mm->page_table_lock);
69 __insert_vm_struct(current->mm, n);
70 spin_unlock(&vma->vm_mm->page_table_lock);
71 unlock_vma_mappings(vma);
75 static inline int mlock_fixup_middle(struct vm_area_struct * vma,
76 unsigned long start, unsigned long end, int newflags)
78 struct vm_area_struct * left, * right;
80 left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
83 right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
85 kmem_cache_free(vm_area_cachep, left);
91 right->vm_start = end;
92 right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
93 vma->vm_flags = newflags;
97 atomic_add(2, &vma->vm_file->f_count);
99 if (vma->vm_ops && vma->vm_ops->open) {
100 vma->vm_ops->open(left);
101 vma->vm_ops->open(right);
104 vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
105 lock_vma_mappings(vma);
106 spin_lock(&vma->vm_mm->page_table_lock);
107 vma->vm_start = start;
109 vma->vm_flags = newflags;
110 __insert_vm_struct(current->mm, left);
111 __insert_vm_struct(current->mm, right);
112 spin_unlock(&vma->vm_mm->page_table_lock);
113 unlock_vma_mappings(vma);
117 static int mlock_fixup(struct vm_area_struct * vma,
118 unsigned long start, unsigned long end, unsigned int newflags)
122 if (newflags == vma->vm_flags)
125 if (start == vma->vm_start) {
126 if (end == vma->vm_end)
127 retval = mlock_fixup_all(vma, newflags);
129 retval = mlock_fixup_start(vma, end, newflags);
131 if (end == vma->vm_end)
132 retval = mlock_fixup_end(vma, start, newflags);
134 retval = mlock_fixup_middle(vma, start, end, newflags);
137 /* keep track of amount of locked VM */
138 pages = (end - start) >> PAGE_SHIFT;
139 if (newflags & VM_LOCKED) {
141 make_pages_present(start, end);
143 vma->vm_mm->locked_vm -= pages;
148 static int do_mlock(unsigned long start, size_t len, int on)
150 unsigned long nstart, end, tmp;
151 struct vm_area_struct * vma, * next;
154 if (on && !capable(CAP_IPC_LOCK))
156 len = PAGE_ALIGN(len);
162 vma = find_vma(current->mm, start);
163 if (!vma || vma->vm_start > start)
166 for (nstart = start ; ; ) {
167 unsigned int newflags;
169 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
171 newflags = vma->vm_flags | VM_LOCKED;
173 newflags &= ~VM_LOCKED;
175 if (vma->vm_end >= end) {
176 error = mlock_fixup(vma, nstart, end, newflags);
182 error = mlock_fixup(vma, nstart, tmp, newflags);
187 if (!vma || vma->vm_start != nstart) {
195 asmlinkage long sys_mlock(unsigned long start, size_t len)
197 unsigned long locked;
198 unsigned long lock_limit;
201 down_write(¤t->mm->mmap_sem);
202 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
205 locked = len >> PAGE_SHIFT;
206 locked += current->mm->locked_vm;
208 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
209 lock_limit >>= PAGE_SHIFT;
211 /* check against resource limits */
212 if (locked > lock_limit)
215 /* we may lock at most half of physical memory... */
216 /* (this check is pretty bogus, but doesn't hurt) */
217 if (locked > num_physpages/2)
220 error = do_mlock(start, len, 1);
222 up_write(¤t->mm->mmap_sem);
226 asmlinkage long sys_munlock(unsigned long start, size_t len)
230 down_write(¤t->mm->mmap_sem);
231 len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
233 ret = do_mlock(start, len, 0);
234 up_write(¤t->mm->mmap_sem);
238 static int do_mlockall(int flags)
241 unsigned int def_flags;
242 struct vm_area_struct * vma;
244 if (!capable(CAP_IPC_LOCK))
248 if (flags & MCL_FUTURE)
249 def_flags = VM_LOCKED;
250 current->mm->def_flags = def_flags;
253 for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
254 unsigned int newflags;
256 newflags = vma->vm_flags | VM_LOCKED;
257 if (!(flags & MCL_CURRENT))
258 newflags &= ~VM_LOCKED;
259 error = mlock_fixup(vma, vma->vm_start, vma->vm_end, newflags);
266 asmlinkage long sys_mlockall(int flags)
268 unsigned long lock_limit;
271 down_write(¤t->mm->mmap_sem);
272 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
275 lock_limit = current->rlim[RLIMIT_MEMLOCK].rlim_cur;
276 lock_limit >>= PAGE_SHIFT;
279 if (current->mm->total_vm > lock_limit)
282 /* we may lock at most half of physical memory... */
283 /* (this check is pretty bogus, but doesn't hurt) */
284 if (current->mm->total_vm > num_physpages/2)
287 ret = do_mlockall(flags);
289 up_write(¤t->mm->mmap_sem);
293 asmlinkage long sys_munlockall(void)
297 down_write(¤t->mm->mmap_sem);
298 ret = do_mlockall(0);
299 up_write(¤t->mm->mmap_sem);