4 #include <linux/sched.h>
5 #include <linux/errno.h>
9 #include <linux/config.h>
10 #include <linux/string.h>
11 #include <linux/list.h>
12 #include <linux/mmzone.h>
13 #include <linux/swap.h>
14 #include <linux/rbtree.h>
16 extern unsigned long max_mapnr;
17 extern unsigned long num_physpages;
18 extern unsigned long num_mappedpages;
19 extern void * high_memory;
20 extern int page_cluster;
21 /* The inactive_clean lists are per zone. */
22 extern struct list_head active_list;
23 extern struct list_head inactive_list;
26 #include <asm/pgtable.h>
27 #include <asm/atomic.h>
30 * Linux kernel virtual memory manager primitives.
31 * The idea being to have a "virtual" mm in the same way
32 * we have a virtual fs - giving a cleaner interface to the
33 * mm details, and allowing different kinds of memory mappings
34 * (from shared memory to executable loading to arbitrary
39 * This struct defines a memory VMM memory area. There is one of these
40 * per VM-area/task. A VM area is any part of the process virtual memory
41 * space that has a special rule for the page-fault handlers (ie a shared
42 * library, the executable area etc).
44 struct vm_area_struct {
45 struct mm_struct * vm_mm; /* The address space we belong to. */
46 unsigned long vm_start; /* Our start address within vm_mm. */
47 unsigned long vm_end; /* The first byte after our end address
50 /* linked list of VM areas per task, sorted by address */
51 struct vm_area_struct *vm_next;
53 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
54 unsigned long vm_flags; /* Flags, listed below. */
59 * For areas with an address space and backing store,
60 * one of the address_space->i_mmap{,shared} lists,
61 * for shm areas, the list of attaches, otherwise unused.
63 struct vm_area_struct *vm_next_share;
64 struct vm_area_struct **vm_pprev_share;
66 /* Function pointers to deal with this struct. */
67 struct vm_operations_struct * vm_ops;
69 /* Information about our backing store: */
70 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
71 units, *not* PAGE_CACHE_SIZE */
72 struct file * vm_file; /* File we map to (can be NULL). */
73 unsigned long vm_raend; /* XXX: put full readahead info here. */
74 void * vm_private_data; /* was vm_pte (shared mem) */
80 #define VM_READ 0x00000001 /* currently active flags */
81 #define VM_WRITE 0x00000002
82 #define VM_EXEC 0x00000004
83 #define VM_SHARED 0x00000008
85 #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
86 #define VM_MAYWRITE 0x00000020
87 #define VM_MAYEXEC 0x00000040
88 #define VM_MAYSHARE 0x00000080
90 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */
91 #define VM_GROWSUP 0x00000200
92 #define VM_SHM 0x00000400 /* shared memory area, don't swap out */
93 #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
95 #define VM_EXECUTABLE 0x00001000
96 #define VM_LOCKED 0x00002000
97 #define VM_IO 0x00004000 /* Memory mapped I/O or similar */
99 /* Used by sys_madvise() */
100 #define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
101 #define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
103 #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
104 #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
105 #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
107 #define VM_STACK_FLAGS 0x00000177
109 #define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
110 #define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
111 #define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
112 #define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
113 #define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
115 /* read ahead limits */
116 extern int vm_min_readahead;
117 extern int vm_max_readahead;
120 * mapping from the currently active vm_flags protection bits (the
121 * low four bits) to a page protection mask..
123 extern pgprot_t protection_map[16];
127 * These are the virtual MM functions - opening of an area, closing and
128 * unmapping it (needed to keep files on disk up-to-date etc), pointer
129 * to the functions called when a no-page or a wp-page exception occurs.
131 struct vm_operations_struct {
132 void (*open)(struct vm_area_struct * area);
133 void (*close)(struct vm_area_struct * area);
134 struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int unused);
138 * Each physical page in the system has a struct page associated with
139 * it to keep track of whatever it is we are using the page for at the
140 * moment. Note that we have no way to track which tasks are using
143 * Try to keep the most commonly accessed fields in single cache lines
144 * here (16 bytes or greater). This ordering should be particularly
145 * beneficial on 32-bit processors.
147 * The first line is data used in page cache lookup, the second line
148 * is used for linear searches (eg. clock algorithm scans).
150 * TODO: make this structure smaller, it could be as small as 32 bytes.
152 typedef struct page {
153 struct list_head list; /* ->mapping has some page lists. */
154 struct address_space *mapping; /* The inode (or ...) we belong to. */
155 unsigned long index; /* Our offset within mapping. */
156 struct page *next_hash; /* Next page sharing our hash bucket in
157 the pagecache hash table. */
158 atomic_t count; /* Usage count, see below. */
159 unsigned long flags; /* atomic flags, some possibly
160 updated asynchronously */
161 struct list_head lru; /* Pageout list, eg. active_list;
162 protected by pagemap_lru_lock !! */
163 struct page **pprev_hash; /* Complement to *next_hash. */
164 struct buffer_head * buffers; /* Buffer maps us to a disk block. */
167 * On machines where all RAM is mapped into kernel address space,
168 * we can simply calculate the virtual address. On machines with
169 * highmem some memory is mapped into kernel virtual memory
170 * dynamically, so we need a place to store that address.
171 * Note that this field could be 16 bits on x86 ... ;)
173 * Architectures with slow multiplication can define
174 * WANT_PAGE_VIRTUAL in asm/page.h
176 #if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
177 void *virtual; /* Kernel virtual address (NULL if
178 not kmapped, ie. highmem) */
179 #endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */
183 * Methods to modify the page usage count.
185 * What counts for a page usage:
186 * - cache mapping (page->mapping)
187 * - disk mapping (page->buffers)
188 * - page mapped in a task's page tables, each mapping
189 * is counted separately
191 * Also, many kernel routines increase the page count before a critical
192 * routine so they can be sure the page doesn't go away from under them.
194 #define get_page(p) atomic_inc(&(p)->count)
195 #define put_page(p) __free_page(p)
196 #define put_page_testzero(p) atomic_dec_and_test(&(p)->count)
197 #define page_count(p) atomic_read(&(p)->count)
198 #define set_page_count(p,v) atomic_set(&(p)->count, v)
201 * Various page->flags bits:
203 * PG_reserved is set for special pages, which can never be swapped
204 * out. Some of them might not even exist (eg empty_bad_page)...
206 * Multiple processes may "see" the same page. E.g. for untouched
207 * mappings of /dev/null, all processes see the same page full of
208 * zeroes, and text pages of executables and shared libraries have
209 * only one copy in memory, at most, normally.
211 * For the non-reserved pages, page->count denotes a reference count.
212 * page->count == 0 means the page is free.
213 * page->count == 1 means the page is used for exactly one purpose
214 * (e.g. a private data page of one process).
216 * A page may be used for kmalloc() or anyone else who does a
217 * __get_free_page(). In this case the page->count is at least 1, and
218 * all other fields are unused but should be 0 or NULL. The
219 * management of this page is the responsibility of the one who uses
222 * The other pages (we may call them "process pages") are completely
223 * managed by the Linux memory manager: I/O, buffers, swapping etc.
224 * The following discussion applies only to them.
226 * A page may belong to an inode's memory mapping. In this case,
227 * page->mapping is the pointer to the inode, and page->index is the
228 * file offset of the page, in units of PAGE_CACHE_SIZE.
230 * A page may have buffers allocated to it. In this case,
231 * page->buffers is a circular list of these buffer heads. Else,
232 * page->buffers == NULL.
234 * For pages belonging to inodes, the page->count is the number of
235 * attaches, plus 1 if buffers are allocated to the page, plus one
236 * for the page cache itself.
238 * All pages belonging to an inode are in these doubly linked lists:
239 * mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages;
240 * using the page->list list_head. These fields are also used for
241 * freelist managemet (when page->count==0).
243 * There is also a hash table mapping (mapping,index) to the page
244 * in memory if present. The lists for this hash table use the fields
245 * page->next_hash and page->pprev_hash.
247 * All process pages can do I/O:
248 * - inode pages may need to be read from disk,
249 * - inode pages which have been modified and are MAP_SHARED may need
250 * to be written to disk,
251 * - private pages which have been modified may need to be swapped out
252 * to swap space and (later) to be read back into memory.
253 * During disk I/O, PG_locked is used. This bit is set before I/O
254 * and reset when I/O completes. page_waitqueue(page) is a wait queue of all
255 * tasks waiting for the I/O on this page to complete.
256 * PG_uptodate tells whether the page's contents is valid.
257 * When a read completes, the page becomes uptodate, unless a disk I/O
260 * For choosing which pages to swap out, inode pages carry a
261 * PG_referenced bit, which is set any time the system accesses
262 * that page through the (mapping,index) hash table. This referenced
263 * bit, together with the referenced bit in the page tables, is used
264 * to manipulate page->age and move the page across the active,
265 * inactive_dirty and inactive_clean lists.
267 * Note that the referenced bit, the page->lru list_head and the
268 * active, inactive_dirty and inactive_clean lists are protected by
269 * the pagemap_lru_lock, and *NOT* by the usual PG_locked bit!
271 * PG_skip is used on sparc/sparc64 architectures to "skip" certain
272 * parts of the address space.
274 * PG_error is set to indicate that an I/O error occurred on this page.
276 * PG_arch_1 is an architecture specific page state bit. The generic
277 * code guarantees that this bit is cleared for a page when it first
278 * is entered into the page cache.
280 * PG_highmem pages are not permanently mapped into the kernel virtual
281 * address space, they need to be kmapped separately for doing IO on
282 * the pages. The struct page (these bits with information) are always
283 * mapped into kernel address space...
285 #define PG_locked 0 /* Page is locked. Don't touch. */
287 #define PG_referenced 2
288 #define PG_uptodate 3
295 #define PG_highmem 11
296 #define PG_checked 12 /* kill me in 2.5.<early>. */
298 #define PG_reserved 14
299 #define PG_launder 15 /* written out by VM pressure.. */
300 #define PG_fs_1 16 /* Filesystem specific */
302 /* Make it prettier to test the above... */
303 #define UnlockPage(page) unlock_page(page)
304 #define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
305 #define SetPageUptodate(page) set_bit(PG_uptodate, &(page)->flags)
306 #define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
307 #define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
308 #define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
309 #define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
310 #define PageLocked(page) test_bit(PG_locked, &(page)->flags)
311 #define LockPage(page) set_bit(PG_locked, &(page)->flags)
312 #define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags)
313 #define PageChecked(page) test_bit(PG_checked, &(page)->flags)
314 #define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
315 #define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
316 #define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
317 #define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)
320 * The zone field is never updated after free_area_init_core()
321 * sets it, so none of the operations on it need to be atomic.
324 #define ZONE_SHIFT (BITS_PER_LONG - 8)
327 extern struct zone_struct *zone_table[];
329 static inline zone_t *page_zone(struct page *page)
331 return zone_table[page->flags >> ZONE_SHIFT];
334 static inline void set_page_zone(struct page *page, unsigned long zone_num)
336 page->flags &= ~(~0UL << ZONE_SHIFT);
337 page->flags |= zone_num << ZONE_SHIFT;
341 * In order to avoid #ifdefs within C code itself, we define
342 * set_page_address to a noop for non-highmem machines, where
343 * the field isn't useful.
344 * The same is true for page_address() in arch-dependent code.
346 #if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
348 #define set_page_address(page, address) \
350 (page)->virtual = (address); \
353 #else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
354 #define set_page_address(page, address) do { } while(0)
355 #endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
358 * Permanent address of a page. Obviously must never be
359 * called on a highmem page.
361 #if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
363 #define page_address(page) ((page)->virtual)
365 #else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
367 #define page_address(page) \
368 __va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
369 + page_zone(page)->zone_start_paddr)
371 #endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
373 extern void FASTCALL(set_page_dirty(struct page *));
376 * The first mb is necessary to safely close the critical section opened by the
377 * TryLockPage(), the second mb is necessary to enforce ordering between
378 * the clear_bit and the read of the waitqueue (to avoid SMP races with a
379 * parallel wait_on_page).
381 #define PageError(page) test_bit(PG_error, &(page)->flags)
382 #define SetPageError(page) set_bit(PG_error, &(page)->flags)
383 #define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
384 #define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
385 #define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
386 #define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
387 #define PageTestandClearReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
388 #define PageSlab(page) test_bit(PG_slab, &(page)->flags)
389 #define PageSetSlab(page) set_bit(PG_slab, &(page)->flags)
390 #define PageClearSlab(page) clear_bit(PG_slab, &(page)->flags)
391 #define PageReserved(page) test_bit(PG_reserved, &(page)->flags)
393 #define PageActive(page) test_bit(PG_active, &(page)->flags)
394 #define SetPageActive(page) set_bit(PG_active, &(page)->flags)
395 #define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
397 #define PageLRU(page) test_bit(PG_lru, &(page)->flags)
398 #define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags)
399 #define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
401 #ifdef CONFIG_HIGHMEM
402 #define PageHighMem(page) test_bit(PG_highmem, &(page)->flags)
404 #define PageHighMem(page) 0 /* needed to optimize away at compile time */
407 #define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags)
408 #define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags)
411 * Error return values for the *_nopage functions
413 #define NOPAGE_SIGBUS (NULL)
414 #define NOPAGE_OOM ((struct page *) (-1))
416 /* The array of struct pages */
417 extern mem_map_t * mem_map;
420 * There is only one page-allocator function, and two main namespaces to
421 * it. The alloc_page*() variants return 'struct page *' and as such
422 * can allocate highmem pages, the *get*page*() variants return
423 * virtual kernel addresses to the allocated page(s).
425 extern struct page * FASTCALL(_alloc_pages(unsigned int gfp_mask, unsigned int order));
426 extern struct page * FASTCALL(__alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist));
427 extern struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int order);
429 static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order)
432 * Gets optimized away by the compiler.
434 if (order >= MAX_ORDER)
436 return _alloc_pages(gfp_mask, order);
439 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
441 extern unsigned long FASTCALL(__get_free_pages(unsigned int gfp_mask, unsigned int order));
442 extern unsigned long FASTCALL(get_zeroed_page(unsigned int gfp_mask));
444 #define __get_free_page(gfp_mask) \
445 __get_free_pages((gfp_mask),0)
447 #define __get_dma_pages(gfp_mask, order) \
448 __get_free_pages((gfp_mask) | GFP_DMA,(order))
451 * The old interface name will be removed in 2.5:
453 #define get_free_page get_zeroed_page
456 * There is only one 'core' page-freeing function.
458 extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
459 extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
461 #define __free_page(page) __free_pages((page), 0)
462 #define free_page(addr) free_pages((addr),0)
464 extern void show_free_areas(void);
465 extern void show_free_areas_node(pg_data_t *pgdat);
467 extern void clear_page_tables(struct mm_struct *, unsigned long, int);
469 extern int fail_writepage(struct page *);
470 struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused);
471 struct file *shmem_file_setup(char * name, loff_t size);
472 extern void shmem_lock(struct file * file, int lock);
473 extern int shmem_zero_setup(struct vm_area_struct *);
475 extern void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
476 extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
477 extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
478 extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
480 extern int vmtruncate(struct inode * inode, loff_t offset);
481 extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
482 extern pte_t *FASTCALL(pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
483 extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
484 extern int make_pages_present(unsigned long addr, unsigned long end);
485 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
486 extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char *dst, int len);
487 extern int ptrace_writedata(struct task_struct *tsk, char * src, unsigned long dst, int len);
488 extern int ptrace_attach(struct task_struct *tsk);
489 extern int ptrace_detach(struct task_struct *, unsigned int);
490 extern void ptrace_disable(struct task_struct *);
491 extern int ptrace_check_attach(struct task_struct *task, int kill);
493 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
494 int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
497 * On a two-level page table, this ends up being trivial. Thus the
498 * inlining and the symmetry break with pte_alloc() that does all
499 * of this out-of-line.
501 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
504 return __pmd_alloc(mm, pgd, address);
505 return pmd_offset(pgd, address);
508 extern int pgt_cache_water[2];
509 extern int check_pgt_cache(void);
511 extern void free_area_init(unsigned long * zones_size);
512 extern void free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
513 unsigned long * zones_size, unsigned long zone_start_paddr,
514 unsigned long *zholes_size);
515 extern void mem_init(void);
516 extern void show_mem(void);
517 extern void si_meminfo(struct sysinfo * val);
518 extern void swapin_readahead(swp_entry_t);
520 extern struct address_space swapper_space;
521 #define PageSwapCache(page) ((page)->mapping == &swapper_space)
523 static inline int is_page_cache_freeable(struct page * page)
525 return page_count(page) - !!page->buffers == 1;
528 extern int can_share_swap_page(struct page *);
529 extern int remove_exclusive_swap_page(struct page *);
531 extern void __free_pte(pte_t);
534 extern void lock_vma_mappings(struct vm_area_struct *);
535 extern void unlock_vma_mappings(struct vm_area_struct *);
536 extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
537 extern void __insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
538 extern void build_mmap_rb(struct mm_struct *);
539 extern void exit_mmap(struct mm_struct *);
541 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
543 extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
544 unsigned long len, unsigned long prot,
545 unsigned long flag, unsigned long pgoff);
547 static inline unsigned long do_mmap(struct file *file, unsigned long addr,
548 unsigned long len, unsigned long prot,
549 unsigned long flag, unsigned long offset)
551 unsigned long ret = -EINVAL;
552 if ((offset + PAGE_ALIGN(len)) < offset)
554 if (!(offset & ~PAGE_MASK))
555 ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
560 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
562 extern unsigned long do_brk(unsigned long, unsigned long);
564 static inline void __vma_unlink(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev)
566 prev->vm_next = vma->vm_next;
567 rb_erase(&vma->vm_rb, &mm->mm_rb);
568 if (mm->mmap_cache == vma)
569 mm->mmap_cache = prev;
572 static inline int can_vma_merge(struct vm_area_struct * vma, unsigned long vm_flags)
574 if (!vma->vm_file && vma->vm_flags == vm_flags)
582 extern void remove_inode_page(struct page *);
583 extern unsigned long page_unuse(struct page *);
584 extern void truncate_inode_pages(struct address_space *, loff_t);
586 /* generic vm_area_ops exported for stackable file systems */
587 extern int filemap_sync(struct vm_area_struct *, unsigned long, size_t, unsigned int);
588 extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int);
593 /* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low four bits) */
594 #define __GFP_DMA 0x01
595 #define __GFP_HIGHMEM 0x02
597 /* Action modifiers - doesn't change the zoning */
598 #define __GFP_WAIT 0x10 /* Can wait and reschedule? */
599 #define __GFP_HIGH 0x20 /* Should access emergency pools? */
600 #define __GFP_IO 0x40 /* Can start low memory physical IO? */
601 #define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */
602 #define __GFP_FS 0x100 /* Can call down to low-level FS? */
604 #define GFP_NOHIGHIO (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
605 #define GFP_NOIO (__GFP_HIGH | __GFP_WAIT)
606 #define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO)
607 #define GFP_ATOMIC (__GFP_HIGH)
608 #define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
609 #define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
610 #define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
611 #define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
612 #define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
614 /* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
615 platforms, used as appropriate on others */
617 #define GFP_DMA __GFP_DMA
619 static inline unsigned int pf_gfp_mask(unsigned int gfp_mask)
621 /* avoid all memory balancing I/O methods if this task cannot block on I/O */
622 if (current->flags & PF_NOIO)
623 gfp_mask &= ~(__GFP_IO | __GFP_HIGHIO | __GFP_FS);
628 /* vma is the first one with address < vma->vm_end,
629 * and even address < vma->vm_start. Have to extend vma. */
630 static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
635 * vma->vm_start/vm_end cannot change under us because the caller is required
636 * to hold the mmap_sem in write mode. We need to get the spinlock only
637 * before relocating the vma range ourself.
639 address &= PAGE_MASK;
640 spin_lock(&vma->vm_mm->page_table_lock);
641 grow = (vma->vm_start - address) >> PAGE_SHIFT;
642 if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
643 ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) {
644 spin_unlock(&vma->vm_mm->page_table_lock);
647 vma->vm_start = address;
648 vma->vm_pgoff -= grow;
649 vma->vm_mm->total_vm += grow;
650 if (vma->vm_flags & VM_LOCKED)
651 vma->vm_mm->locked_vm += grow;
652 spin_unlock(&vma->vm_mm->page_table_lock);
656 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
657 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
658 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
659 struct vm_area_struct **pprev);
661 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
662 NULL if none. Assume start_addr < end_addr. */
663 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
665 struct vm_area_struct * vma = find_vma(mm,start_addr);
667 if (vma && end_addr <= vma->vm_start)
672 extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);
674 extern struct page * vmalloc_to_page(void *addr);
676 #endif /* __KERNEL__ */