4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
9 #include <linux/config.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include <linux/spinlock.h>
13 #include <linux/highmem.h>
14 #include <linux/smp_lock.h>
16 #include <asm/uaccess.h>
17 #include <asm/pgalloc.h>
19 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
20 struct vm_struct * vmlist;
22 static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
34 pte = pte_offset(pmd, address);
41 page = ptep_get_and_clear(pte);
46 if (pte_present(page)) {
47 struct page *ptpage = pte_page(page);
48 if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
52 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
53 } while (address < end);
56 static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
68 pmd = pmd_offset(dir, address);
69 address &= ~PGDIR_MASK;
74 free_area_pte(pmd, address, end - address);
75 address = (address + PMD_SIZE) & PMD_MASK;
77 } while (address < end);
80 void vmfree_area_pages(unsigned long address, unsigned long size)
83 unsigned long end = address + size;
85 dir = pgd_offset_k(address);
88 free_area_pmd(dir, address, end - address);
89 address = (address + PGDIR_SIZE) & PGDIR_MASK;
91 } while (address && (address < end));
95 static inline int alloc_area_pte (pte_t * pte, unsigned long address,
96 unsigned long size, int gfp_mask, pgprot_t prot)
100 address &= ~PMD_MASK;
101 end = address + size;
106 spin_unlock(&init_mm.page_table_lock);
107 page = alloc_page(gfp_mask);
108 spin_lock(&init_mm.page_table_lock);
110 printk(KERN_ERR "alloc_area_pte: page already exists\n");
113 set_pte(pte, mk_pte(page, prot));
114 address += PAGE_SIZE;
116 } while (address < end);
120 static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, int gfp_mask, pgprot_t prot)
124 address &= ~PGDIR_MASK;
125 end = address + size;
126 if (end > PGDIR_SIZE)
129 pte_t * pte = pte_alloc(&init_mm, pmd, address);
132 if (alloc_area_pte(pte, address, end - address, gfp_mask, prot))
134 address = (address + PMD_SIZE) & PMD_MASK;
136 } while (address < end);
140 inline int vmalloc_area_pages (unsigned long address, unsigned long size,
141 int gfp_mask, pgprot_t prot)
144 unsigned long end = address + size;
147 dir = pgd_offset_k(address);
148 spin_lock(&init_mm.page_table_lock);
152 pmd = pmd_alloc(&init_mm, dir, address);
158 if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot))
161 address = (address + PGDIR_SIZE) & PGDIR_MASK;
165 } while (address && (address < end));
166 spin_unlock(&init_mm.page_table_lock);
171 struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
174 struct vm_struct **p, *tmp, *area;
176 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
186 addr = VMALLOC_START;
187 write_lock(&vmlist_lock);
188 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
189 if ((size + addr) < addr)
191 if (size + addr <= (unsigned long) tmp->addr)
193 addr = tmp->size + (unsigned long) tmp->addr;
194 if (addr > VMALLOC_END-size)
198 area->addr = (void *)addr;
202 write_unlock(&vmlist_lock);
206 write_unlock(&vmlist_lock);
211 void vfree(void * addr)
213 struct vm_struct **p, *tmp;
217 if ((PAGE_SIZE-1) & (unsigned long) addr) {
218 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
221 write_lock(&vmlist_lock);
222 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
223 if (tmp->addr == addr) {
225 vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
226 write_unlock(&vmlist_lock);
231 write_unlock(&vmlist_lock);
232 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
235 void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot)
238 struct vm_struct *area;
240 size = PAGE_ALIGN(size);
241 if (!size || (size >> PAGE_SHIFT) > num_physpages) {
245 area = get_vm_area(size, VM_ALLOC);
249 if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask, prot)) {
256 long vread(char *buf, char *addr, unsigned long count)
258 struct vm_struct *tmp;
259 char *vaddr, *buf_start = buf;
262 /* Don't allow overflow */
263 if ((unsigned long) addr + count < count)
264 count = -(unsigned long) addr;
266 read_lock(&vmlist_lock);
267 for (tmp = vmlist; tmp; tmp = tmp->next) {
268 vaddr = (char *) tmp->addr;
269 if (addr >= vaddr + tmp->size - PAGE_SIZE)
271 while (addr < vaddr) {
279 n = vaddr + tmp->size - PAGE_SIZE - addr;
290 read_unlock(&vmlist_lock);
291 return buf - buf_start;
294 long vwrite(char *buf, char *addr, unsigned long count)
296 struct vm_struct *tmp;
297 char *vaddr, *buf_start = buf;
300 /* Don't allow overflow */
301 if ((unsigned long) addr + count < count)
302 count = -(unsigned long) addr;
304 read_lock(&vmlist_lock);
305 for (tmp = vmlist; tmp; tmp = tmp->next) {
306 vaddr = (char *) tmp->addr;
307 if (addr >= vaddr + tmp->size - PAGE_SIZE)
309 while (addr < vaddr) {
316 n = vaddr + tmp->size - PAGE_SIZE - addr;
327 read_unlock(&vmlist_lock);
328 return buf - buf_start;