bool need_rmap_locks)
{
unsigned long extent, next, old_end;
+ struct mmu_notifier_range range;
pmd_t *old_pmd, *new_pmd;
- unsigned long mmun_start; /* For mmu_notifiers */
- unsigned long mmun_end; /* For mmu_notifiers */
old_end = old_addr + len;
flush_cache_range(vma, old_addr, old_end);
- mmun_start = old_addr;
- mmun_end = old_end;
- mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
+ mmu_notifier_range_init(&range, vma->vm_mm, old_addr, old_end);
+ mmu_notifier_invalidate_range_start(&range);
for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
cond_resched();
new_pmd, new_addr, need_rmap_locks);
}
- mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
+ mmu_notifier_invalidate_range_end(&range);
return len + old_addr - old_end; /* how much done */
}
unsigned long ret = -EINVAL;
unsigned long charged = 0;
bool locked = false;
+ bool downgraded = false;
struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
LIST_HEAD(uf_unmap_early);
LIST_HEAD(uf_unmap);
/*
* Always allow a shrinking remap: that just unmaps
* the unnecessary pages..
- * do_munmap does all the needed commit accounting
+ * __do_munmap does all the needed commit accounting, and
+ * downgrades mmap_sem to read if so directed.
*/
if (old_len >= new_len) {
- ret = do_munmap(mm, addr+new_len, old_len - new_len, &uf_unmap);
- if (ret && old_len != new_len)
+ int retval;
+
+ retval = __do_munmap(mm, addr+new_len, old_len - new_len,
+ &uf_unmap, true);
+ if (retval < 0 && old_len != new_len) {
+ ret = retval;
goto out;
+ /* Returning 1 indicates mmap_sem is downgraded to read. */
+ } else if (retval == 1)
+ downgraded = true;
ret = addr;
goto out;
}
vm_unacct_memory(charged);
locked = 0;
}
- up_write(¤t->mm->mmap_sem);
+ if (downgraded)
+ up_read(¤t->mm->mmap_sem);
+ else
+ up_write(¤t->mm->mmap_sem);
if (locked && new_len > old_len)
mm_populate(new_addr + old_len, new_len - old_len);
userfaultfd_unmap_complete(mm, &uf_unmap_early);