4 * Copyright (C) 1994-1999 Linus Torvalds
8 * This file handles the generic file mmap semantics used by
9 * most "normal" filesystems (but you don't /have/ to use this:
10 * the NFS filesystem used to do this differently, for example)
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/shm.h>
15 #include <linux/mman.h>
16 #include <linux/locks.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/smp_lock.h>
20 #include <linux/blkdev.h>
21 #include <linux/file.h>
22 #include <linux/swapctl.h>
23 #include <linux/init.h>
25 #include <linux/iobuf.h>
27 #include <asm/pgalloc.h>
28 #include <asm/uaccess.h>
31 #include <linux/highmem.h>
34 * Shared mappings implemented 30.11.1994. It's not fully working yet,
37 * Shared mappings now work. 15.8.1995 Bruno.
39 * finished 'unifying' the page and buffer cache and SMP-threaded the
40 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
42 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
45 atomic_t page_cache_size = ATOMIC_INIT(0);
46 unsigned int page_hash_bits;
47 struct page **page_hash_table;
49 int vm_max_readahead = 31;
50 int vm_min_readahead = 3;
51 EXPORT_SYMBOL(vm_max_readahead);
52 EXPORT_SYMBOL(vm_min_readahead);
55 spinlock_cacheline_t pagecache_lock_cacheline = {SPIN_LOCK_UNLOCKED};
57 * NOTE: to avoid deadlocking you must never acquire the pagemap_lru_lock
58 * with the pagecache_lock held.
65 spinlock_cacheline_t pagemap_lru_lock_cacheline = {SPIN_LOCK_UNLOCKED};
67 #define CLUSTER_PAGES (1 << page_cluster)
68 #define CLUSTER_OFFSET(x) (((x) >> page_cluster) << page_cluster)
70 static void FASTCALL(add_page_to_hash_queue(struct page * page, struct page **p));
71 static void add_page_to_hash_queue(struct page * page, struct page **p)
73 struct page *next = *p;
76 page->next_hash = next;
79 next->pprev_hash = &page->next_hash;
82 atomic_inc(&page_cache_size);
85 static inline void add_page_to_inode_queue(struct address_space *mapping, struct page * page)
87 struct list_head *head = &mapping->clean_pages;
90 list_add(&page->list, head);
91 page->mapping = mapping;
94 static inline void remove_page_from_inode_queue(struct page * page)
96 struct address_space * mapping = page->mapping;
98 if (mapping->a_ops->removepage)
99 mapping->a_ops->removepage(page);
102 list_del(&page->list);
103 page->mapping = NULL;
106 static inline void remove_page_from_hash_queue(struct page * page)
108 struct page *next = page->next_hash;
109 struct page **pprev = page->pprev_hash;
112 next->pprev_hash = pprev;
114 page->pprev_hash = NULL;
115 atomic_dec(&page_cache_size);
119 * Remove a page from the page cache and free it. Caller has to make
120 * sure the page is locked and that nobody else uses it - or that usage
123 void __remove_inode_page(struct page *page)
125 if (PageDirty(page) && !PageSwapCache(page))
127 remove_page_from_inode_queue(page);
128 remove_page_from_hash_queue(page);
131 void remove_inode_page(struct page *page)
133 if (!PageLocked(page))
136 spin_lock(&pagecache_lock);
137 __remove_inode_page(page);
138 spin_unlock(&pagecache_lock);
141 static inline int sync_page(struct page *page)
143 struct address_space *mapping = page->mapping;
145 if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
146 return mapping->a_ops->sync_page(page);
151 * Add a page to the dirty page list.
153 void set_page_dirty(struct page *page)
155 if (!test_and_set_bit(PG_dirty, &page->flags)) {
156 struct address_space *mapping = page->mapping;
159 spin_lock(&pagecache_lock);
160 mapping = page->mapping;
161 if (mapping) { /* may have been truncated */
162 list_del(&page->list);
163 list_add(&page->list, &mapping->dirty_pages);
165 spin_unlock(&pagecache_lock);
167 if (mapping && mapping->host)
168 mark_inode_dirty_pages(mapping->host);
174 * invalidate_inode_pages - Invalidate all the unlocked pages of one inode
175 * @inode: the inode which pages we want to invalidate
177 * This function only removes the unlocked pages, if you want to
178 * remove all the pages of one inode, you must call truncate_inode_pages.
181 void invalidate_inode_pages(struct inode * inode)
183 struct list_head *head, *curr;
186 head = &inode->i_mapping->clean_pages;
188 spin_lock(&pagemap_lru_lock);
189 spin_lock(&pagecache_lock);
192 while (curr != head) {
193 page = list_entry(curr, struct page, list);
196 /* We cannot invalidate something in dirty.. */
201 if (TryLockPage(page))
204 if (page->buffers && !try_to_free_buffers(page, 0))
207 if (page_count(page) != 1)
210 __lru_cache_del(page);
211 __remove_inode_page(page);
213 page_cache_release(page);
220 spin_unlock(&pagecache_lock);
221 spin_unlock(&pagemap_lru_lock);
224 static int do_flushpage(struct page *page, unsigned long offset)
226 int (*flushpage) (struct page *, unsigned long);
227 flushpage = page->mapping->a_ops->flushpage;
229 return (*flushpage)(page, offset);
230 return block_flushpage(page, offset);
233 static inline void truncate_partial_page(struct page *page, unsigned partial)
235 memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
237 do_flushpage(page, partial);
240 static void truncate_complete_page(struct page *page)
242 /* Leave it on the LRU if it gets converted into anonymous buffers */
243 if (!page->buffers || do_flushpage(page, 0))
247 * We remove the page from the page cache _after_ we have
248 * destroyed all buffer-cache references to it. Otherwise some
249 * other process might think this inode page is not in the
250 * page cache and creates a buffer-cache alias to it causing
251 * all sorts of fun problems ...
253 ClearPageDirty(page);
254 ClearPageUptodate(page);
255 remove_inode_page(page);
256 page_cache_release(page);
259 static int FASTCALL(truncate_list_pages(struct list_head *, unsigned long, unsigned *));
260 static int truncate_list_pages(struct list_head *head, unsigned long start, unsigned *partial)
262 struct list_head *curr;
268 while (curr != head) {
269 unsigned long offset;
271 page = list_entry(curr, struct page, list);
272 offset = page->index;
274 /* Is one of the pages to truncate? */
275 if ((offset >= start) || (*partial && (offset + 1) == start)) {
278 page_cache_get(page);
279 failed = TryLockPage(page);
283 /* Restart after this page */
284 list_add_tail(head, curr);
286 /* Restart on this page */
287 list_add(head, curr);
289 spin_unlock(&pagecache_lock);
293 if (*partial && (offset + 1) == start) {
294 truncate_partial_page(page, *partial);
297 truncate_complete_page(page);
303 page_cache_release(page);
305 if (current->need_resched) {
306 __set_current_state(TASK_RUNNING);
310 spin_lock(&pagecache_lock);
320 * truncate_inode_pages - truncate *all* the pages from an offset
321 * @mapping: mapping to truncate
322 * @lstart: offset from with to truncate
324 * Truncate the page cache at a set offset, removing the pages
325 * that are beyond that offset (and zeroing out partial pages).
326 * If any page is locked we wait for it to become unlocked.
328 void truncate_inode_pages(struct address_space * mapping, loff_t lstart)
330 unsigned long start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
331 unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
334 spin_lock(&pagecache_lock);
336 unlocked = truncate_list_pages(&mapping->clean_pages, start, &partial);
337 unlocked |= truncate_list_pages(&mapping->dirty_pages, start, &partial);
338 unlocked |= truncate_list_pages(&mapping->locked_pages, start, &partial);
340 /* Traversed all three lists without dropping the lock */
341 spin_unlock(&pagecache_lock);
344 static inline int invalidate_this_page2(struct page * page,
345 struct list_head * curr,
346 struct list_head * head)
351 * The page is locked and we hold the pagecache_lock as well
352 * so both page_count(page) and page->buffers stays constant here.
354 if (page_count(page) == 1 + !!page->buffers) {
355 /* Restart after this page */
357 list_add_tail(head, curr);
359 page_cache_get(page);
360 spin_unlock(&pagecache_lock);
361 truncate_complete_page(page);
364 /* Restart after this page */
366 list_add_tail(head, curr);
368 page_cache_get(page);
369 spin_unlock(&pagecache_lock);
370 block_invalidate_page(page);
374 ClearPageDirty(page);
375 ClearPageUptodate(page);
381 static int FASTCALL(invalidate_list_pages2(struct list_head *));
382 static int invalidate_list_pages2(struct list_head *head)
384 struct list_head *curr;
390 while (curr != head) {
391 page = list_entry(curr, struct page, list);
393 if (!TryLockPage(page)) {
396 __unlocked = invalidate_this_page2(page, curr, head);
398 unlocked |= __unlocked;
404 /* Restart on this page */
406 list_add(head, curr);
408 page_cache_get(page);
409 spin_unlock(&pagecache_lock);
414 page_cache_release(page);
415 if (current->need_resched) {
416 __set_current_state(TASK_RUNNING);
420 spin_lock(&pagecache_lock);
427 * invalidate_inode_pages2 - Clear all the dirty bits around if it can't
428 * free the pages because they're mapped.
429 * @mapping: the address_space which pages we want to invalidate
431 void invalidate_inode_pages2(struct address_space * mapping)
435 spin_lock(&pagecache_lock);
437 unlocked = invalidate_list_pages2(&mapping->clean_pages);
438 unlocked |= invalidate_list_pages2(&mapping->dirty_pages);
439 unlocked |= invalidate_list_pages2(&mapping->locked_pages);
441 spin_unlock(&pagecache_lock);
444 static inline struct page * __find_page_nolock(struct address_space *mapping, unsigned long offset, struct page *page)
449 page = page->next_hash;
453 if (page->mapping != mapping)
455 if (page->index == offset)
463 static int do_buffer_fdatasync(struct list_head *head, unsigned long start, unsigned long end, int (*fn)(struct page *))
465 struct list_head *curr;
469 spin_lock(&pagecache_lock);
471 while (curr != head) {
472 page = list_entry(curr, struct page, list);
476 if (page->index >= end)
478 if (page->index < start)
481 page_cache_get(page);
482 spin_unlock(&pagecache_lock);
485 /* The buffers could have been free'd while we waited for the page lock */
490 spin_lock(&pagecache_lock);
491 curr = page->list.next;
492 page_cache_release(page);
494 spin_unlock(&pagecache_lock);
500 * Two-stage data sync: first start the IO, then go back and
501 * collect the information..
503 int generic_buffer_fdatasync(struct inode *inode, unsigned long start_idx, unsigned long end_idx)
507 /* writeout dirty buffers on pages from both clean and dirty lists */
508 retval = do_buffer_fdatasync(&inode->i_mapping->dirty_pages, start_idx, end_idx, writeout_one_page);
509 retval |= do_buffer_fdatasync(&inode->i_mapping->clean_pages, start_idx, end_idx, writeout_one_page);
510 retval |= do_buffer_fdatasync(&inode->i_mapping->locked_pages, start_idx, end_idx, writeout_one_page);
512 /* now wait for locked buffers on pages from both clean and dirty lists */
513 retval |= do_buffer_fdatasync(&inode->i_mapping->dirty_pages, start_idx, end_idx, waitfor_one_page);
514 retval |= do_buffer_fdatasync(&inode->i_mapping->clean_pages, start_idx, end_idx, waitfor_one_page);
515 retval |= do_buffer_fdatasync(&inode->i_mapping->locked_pages, start_idx, end_idx, waitfor_one_page);
521 * In-memory filesystems have to fail their
522 * writepage function - and this has to be
523 * worked around in the VM layer..
526 * - mark the page dirty again (but do NOT
527 * add it back to the inode dirty list, as
528 * that would livelock in fdatasync)
529 * - activate the page so that the page stealer
530 * doesn't try to write it out over and over
533 int fail_writepage(struct page *page)
535 /* Only activate on memory-pressure, not fsync.. */
536 if (PageLaunder(page)) {
538 SetPageReferenced(page);
541 /* Set the page dirty again, unlock */
547 EXPORT_SYMBOL(fail_writepage);
550 * filemap_fdatasync - walk the list of dirty pages of the given address space
551 * and writepage() all of them.
553 * @mapping: address space structure to write
556 int filemap_fdatasync(struct address_space * mapping)
559 int (*writepage)(struct page *) = mapping->a_ops->writepage;
561 spin_lock(&pagecache_lock);
563 while (!list_empty(&mapping->dirty_pages)) {
564 struct page *page = list_entry(mapping->dirty_pages.prev, struct page, list);
566 list_del(&page->list);
567 list_add(&page->list, &mapping->locked_pages);
569 if (!PageDirty(page))
572 page_cache_get(page);
573 spin_unlock(&pagecache_lock);
577 if (PageDirty(page)) {
579 ClearPageDirty(page);
580 err = writepage(page);
586 page_cache_release(page);
587 spin_lock(&pagecache_lock);
589 spin_unlock(&pagecache_lock);
594 * filemap_fdatawait - walk the list of locked pages of the given address space
595 * and wait for all of them.
597 * @mapping: address space structure to wait for
600 int filemap_fdatawait(struct address_space * mapping)
604 spin_lock(&pagecache_lock);
606 while (!list_empty(&mapping->locked_pages)) {
607 struct page *page = list_entry(mapping->locked_pages.next, struct page, list);
609 list_del(&page->list);
610 list_add(&page->list, &mapping->clean_pages);
612 if (!PageLocked(page))
615 page_cache_get(page);
616 spin_unlock(&pagecache_lock);
618 ___wait_on_page(page);
622 page_cache_release(page);
623 spin_lock(&pagecache_lock);
625 spin_unlock(&pagecache_lock);
630 * Add a page to the inode page cache.
632 * The caller must have locked the page and
633 * set all the page flags correctly..
635 void add_to_page_cache_locked(struct page * page, struct address_space *mapping, unsigned long index)
637 if (!PageLocked(page))
641 page_cache_get(page);
642 spin_lock(&pagecache_lock);
643 add_page_to_inode_queue(mapping, page);
644 add_page_to_hash_queue(page, page_hash(mapping, index));
645 spin_unlock(&pagecache_lock);
651 * This adds a page to the page cache, starting out as locked,
652 * owned by us, but unreferenced, not uptodate and with no errors.
654 static inline void __add_to_page_cache(struct page * page,
655 struct address_space *mapping, unsigned long offset,
660 flags = page->flags & ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_dirty | 1 << PG_referenced | 1 << PG_arch_1 | 1 << PG_checked);
661 page->flags = flags | (1 << PG_locked);
662 page_cache_get(page);
663 page->index = offset;
664 add_page_to_inode_queue(mapping, page);
665 add_page_to_hash_queue(page, hash);
668 void add_to_page_cache(struct page * page, struct address_space * mapping, unsigned long offset)
670 spin_lock(&pagecache_lock);
671 __add_to_page_cache(page, mapping, offset, page_hash(mapping, offset));
672 spin_unlock(&pagecache_lock);
676 int add_to_page_cache_unique(struct page * page,
677 struct address_space *mapping, unsigned long offset,
683 spin_lock(&pagecache_lock);
684 alias = __find_page_nolock(mapping, offset, *hash);
688 __add_to_page_cache(page,mapping,offset,hash);
692 spin_unlock(&pagecache_lock);
699 * This adds the requested page to the page cache if it isn't already there,
700 * and schedules an I/O to read in its contents from disk.
702 static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
703 static int page_cache_read(struct file * file, unsigned long offset)
705 struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
706 struct page **hash = page_hash(mapping, offset);
709 spin_lock(&pagecache_lock);
710 page = __find_page_nolock(mapping, offset, *hash);
711 spin_unlock(&pagecache_lock);
715 page = page_cache_alloc(mapping);
719 if (!add_to_page_cache_unique(page, mapping, offset, hash)) {
720 int error = mapping->a_ops->readpage(file, page);
721 page_cache_release(page);
725 * We arrive here in the unlikely event that someone
726 * raced with us and added our page to the cache first.
728 page_cache_release(page);
733 * Read in an entire cluster at once. A cluster is usually a 64k-
734 * aligned block that includes the page requested in "offset."
736 static int FASTCALL(read_cluster_nonblocking(struct file * file, unsigned long offset,
737 unsigned long filesize));
738 static int read_cluster_nonblocking(struct file * file, unsigned long offset,
739 unsigned long filesize)
741 unsigned long pages = CLUSTER_PAGES;
743 offset = CLUSTER_OFFSET(offset);
744 while ((pages-- > 0) && (offset < filesize)) {
745 int error = page_cache_read(file, offset);
755 * Knuth recommends primes in approximately golden ratio to the maximum
756 * integer representable by a machine word for multiplicative hashing.
757 * Chuck Lever verified the effectiveness of this technique:
758 * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
760 * These primes are chosen to be bit-sparse, that is operations on
761 * them can use shifts and additions instead of multiplications for
762 * machines where multiplications are slow.
764 #if BITS_PER_LONG == 32
765 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
766 #define GOLDEN_RATIO_PRIME 0x9e370001UL
767 #elif BITS_PER_LONG == 64
768 /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
769 #define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
771 #error Define GOLDEN_RATIO_PRIME for your wordsize.
775 * In order to wait for pages to become available there must be
776 * waitqueues associated with pages. By using a hash table of
777 * waitqueues where the bucket discipline is to maintain all
778 * waiters on the same queue and wake all when any of the pages
779 * become available, and for the woken contexts to check to be
780 * sure the appropriate page became available, this saves space
781 * at a cost of "thundering herd" phenomena during rare hash
784 static inline wait_queue_head_t *page_waitqueue(struct page *page)
786 const zone_t *zone = page_zone(page);
787 wait_queue_head_t *wait = zone->wait_table;
788 unsigned long hash = (unsigned long)page;
790 #if BITS_PER_LONG == 64
791 /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
792 unsigned long n = hash;
806 /* On some cpus multiply is faster, on others gcc will do shifts */
807 hash *= GOLDEN_RATIO_PRIME;
809 hash >>= zone->wait_table_shift;
815 * Wait for a page to get unlocked.
817 * This must be called with the caller "holding" the page,
818 * ie with increased "page->count" so that the page won't
819 * go away during the wait..
821 * The waiting strategy is to get on a waitqueue determined
822 * by hashing. Waiters will then collide, and the newly woken
823 * task must then determine whether it was woken for the page
824 * it really wanted, and go back to sleep on the waitqueue if
825 * that wasn't it. With the waitqueue semantics, it never leaves
826 * the waitqueue unless it calls, so the loop moves forward one
827 * iteration every time there is
830 * (2) one of the colliding pages is woken
832 * This is the thundering herd problem, but it is expected to
833 * be very rare due to the few pages that are actually being
834 * waited on at any given time and the quality of the hash function.
836 void ___wait_on_page(struct page *page)
838 wait_queue_head_t *waitqueue = page_waitqueue(page);
839 struct task_struct *tsk = current;
840 DECLARE_WAITQUEUE(wait, tsk);
842 add_wait_queue(waitqueue, &wait);
844 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
845 if (!PageLocked(page))
849 } while (PageLocked(page));
850 __set_task_state(tsk, TASK_RUNNING);
851 remove_wait_queue(waitqueue, &wait);
855 * unlock_page() is the other half of the story just above
856 * __wait_on_page(). Here a couple of quick checks are done
857 * and a couple of flags are set on the page, and then all
858 * of the waiters for all of the pages in the appropriate
859 * wait queue are woken.
861 void unlock_page(struct page *page)
863 wait_queue_head_t *waitqueue = page_waitqueue(page);
864 ClearPageLaunder(page);
865 smp_mb__before_clear_bit();
866 if (!test_and_clear_bit(PG_locked, &(page)->flags))
868 smp_mb__after_clear_bit();
871 * Although the default semantics of wake_up() are
872 * to wake all, here the specific function is used
873 * to make it even more explicit that a number of
874 * pages are being waited on here.
876 if (waitqueue_active(waitqueue))
877 wake_up_all(waitqueue);
881 * Get a lock on the page, assuming we need to sleep
884 static void __lock_page(struct page *page)
886 wait_queue_head_t *waitqueue = page_waitqueue(page);
887 struct task_struct *tsk = current;
888 DECLARE_WAITQUEUE(wait, tsk);
890 add_wait_queue_exclusive(waitqueue, &wait);
892 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
893 if (PageLocked(page)) {
897 if (!TryLockPage(page))
900 __set_task_state(tsk, TASK_RUNNING);
901 remove_wait_queue(waitqueue, &wait);
905 * Get an exclusive lock on the page, optimistically
906 * assuming it's not locked..
908 void lock_page(struct page *page)
910 if (TryLockPage(page))
915 * a rather lightweight function, finding and getting a reference to a
916 * hashed page atomically.
918 struct page * __find_get_page(struct address_space *mapping,
919 unsigned long offset, struct page **hash)
924 * We scan the hash list read-only. Addition to and removal from
925 * the hash-list needs a held write-lock.
927 spin_lock(&pagecache_lock);
928 page = __find_page_nolock(mapping, offset, *hash);
930 page_cache_get(page);
931 spin_unlock(&pagecache_lock);
936 * Same as above, but trylock it instead of incrementing the count.
938 struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
941 struct page **hash = page_hash(mapping, offset);
943 spin_lock(&pagecache_lock);
944 page = __find_page_nolock(mapping, offset, *hash);
946 if (TryLockPage(page))
949 spin_unlock(&pagecache_lock);
954 * Must be called with the pagecache lock held,
955 * will return with it held (but it may be dropped
956 * during blocking operations..
958 static struct page * FASTCALL(__find_lock_page_helper(struct address_space *, unsigned long, struct page *));
959 static struct page * __find_lock_page_helper(struct address_space *mapping,
960 unsigned long offset, struct page *hash)
965 * We scan the hash list read-only. Addition to and removal from
966 * the hash-list needs a held write-lock.
969 page = __find_page_nolock(mapping, offset, hash);
971 page_cache_get(page);
972 if (TryLockPage(page)) {
973 spin_unlock(&pagecache_lock);
975 spin_lock(&pagecache_lock);
977 /* Has the page been re-allocated while we slept? */
978 if (page->mapping != mapping || page->index != offset) {
980 page_cache_release(page);
989 * Same as the above, but lock the page too, verifying that
990 * it's still valid once we own it.
992 struct page * __find_lock_page (struct address_space *mapping,
993 unsigned long offset, struct page **hash)
997 spin_lock(&pagecache_lock);
998 page = __find_lock_page_helper(mapping, offset, *hash);
999 spin_unlock(&pagecache_lock);
1004 * Same as above, but create the page if required..
1006 struct page * find_or_create_page(struct address_space *mapping, unsigned long index, unsigned int gfp_mask)
1009 struct page **hash = page_hash(mapping, index);
1011 spin_lock(&pagecache_lock);
1012 page = __find_lock_page_helper(mapping, index, *hash);
1013 spin_unlock(&pagecache_lock);
1015 struct page *newpage = alloc_page(gfp_mask);
1017 spin_lock(&pagecache_lock);
1018 page = __find_lock_page_helper(mapping, index, *hash);
1019 if (likely(!page)) {
1021 __add_to_page_cache(page, mapping, index, hash);
1024 spin_unlock(&pagecache_lock);
1025 if (newpage == NULL)
1026 lru_cache_add(page);
1028 page_cache_release(newpage);
1035 * Same as grab_cache_page, but do not wait if the page is unavailable.
1036 * This is intended for speculative data generators, where the data can
1037 * be regenerated if the page couldn't be grabbed. This routine should
1038 * be safe to call while holding the lock for another page.
1040 struct page *grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
1042 struct page *page, **hash;
1044 hash = page_hash(mapping, index);
1045 page = __find_get_page(mapping, index, hash);
1048 if ( !TryLockPage(page) ) {
1049 /* Page found and locked */
1050 /* This test is overly paranoid, but what the heck... */
1051 if ( unlikely(page->mapping != mapping || page->index != index) ) {
1052 /* Someone reallocated this page under us. */
1054 page_cache_release(page);
1060 /* Page locked by someone else */
1061 page_cache_release(page);
1066 page = page_cache_alloc(mapping);
1067 if ( unlikely(!page) )
1068 return NULL; /* Failed to allocate a page */
1070 if ( unlikely(add_to_page_cache_unique(page, mapping, index, hash)) ) {
1071 /* Someone else grabbed the page already. */
1072 page_cache_release(page);
1080 #define PROFILE_READAHEAD
1081 #define DEBUG_READAHEAD
1085 * Read-ahead profiling information
1086 * --------------------------------
1087 * Every PROFILE_MAXREADCOUNT, the following information is written
1089 * Percentage of asynchronous read-ahead.
1090 * Average of read-ahead fields context value.
1091 * If DEBUG_READAHEAD is defined, a snapshot of these fields is written
1095 #ifdef PROFILE_READAHEAD
1097 #define PROFILE_MAXREADCOUNT 1000
1099 static unsigned long total_reada;
1100 static unsigned long total_async;
1101 static unsigned long total_ramax;
1102 static unsigned long total_ralen;
1103 static unsigned long total_rawin;
1105 static void profile_readahead(int async, struct file *filp)
1107 unsigned long flags;
1113 total_ramax += filp->f_ramax;
1114 total_ralen += filp->f_ralen;
1115 total_rawin += filp->f_rawin;
1117 if (total_reada > PROFILE_MAXREADCOUNT) {
1120 if (!(total_reada > PROFILE_MAXREADCOUNT)) {
1121 restore_flags(flags);
1125 printk("Readahead average: max=%ld, len=%ld, win=%ld, async=%ld%%\n",
1126 total_ramax/total_reada,
1127 total_ralen/total_reada,
1128 total_rawin/total_reada,
1129 (total_async*100)/total_reada);
1130 #ifdef DEBUG_READAHEAD
1131 printk("Readahead snapshot: max=%ld, len=%ld, win=%ld, raend=%Ld\n",
1132 filp->f_ramax, filp->f_ralen, filp->f_rawin, filp->f_raend);
1141 restore_flags(flags);
1144 #endif /* defined PROFILE_READAHEAD */
1147 * Read-ahead context:
1148 * -------------------
1149 * The read ahead context fields of the "struct file" are the following:
1150 * - f_raend : position of the first byte after the last page we tried to
1152 * - f_ramax : current read-ahead maximum size.
1153 * - f_ralen : length of the current IO read block we tried to read-ahead.
1154 * - f_rawin : length of the current read-ahead window.
1155 * if last read-ahead was synchronous then
1157 * otherwise (was asynchronous)
1158 * f_rawin = previous value of f_ralen + f_ralen
1160 * Read-ahead limits:
1161 * ------------------
1162 * MIN_READAHEAD : minimum read-ahead size when read-ahead.
1163 * MAX_READAHEAD : maximum read-ahead size when read-ahead.
1165 * Synchronous read-ahead benefits:
1166 * --------------------------------
1167 * Using reasonable IO xfer length from peripheral devices increase system
1169 * Reasonable means, in this context, not too large but not too small.
1170 * The actual maximum value is:
1171 * MAX_READAHEAD + PAGE_CACHE_SIZE = 76k is CONFIG_READA_SMALL is undefined
1172 * and 32K if defined (4K page size assumed).
1174 * Asynchronous read-ahead benefits:
1175 * ---------------------------------
1176 * Overlapping next read request and user process execution increase system
1181 * We have to guess which further data are needed by the user process.
1182 * If these data are often not really needed, it's bad for system
1184 * However, we know that files are often accessed sequentially by
1185 * application programs and it seems that it is possible to have some good
1186 * strategy in that guessing.
1187 * We only try to read-ahead files that seems to be read sequentially.
1189 * Asynchronous read-ahead risks:
1190 * ------------------------------
1191 * In order to maximize overlapping, we must start some asynchronous read
1192 * request from the device, as soon as possible.
1193 * We must be very careful about:
1194 * - The number of effective pending IO read requests.
1195 * ONE seems to be the only reasonable value.
1196 * - The total memory pool usage for the file access stream.
1197 * This maximum memory usage is implicitly 2 IO read chunks:
1198 * 2*(MAX_READAHEAD + PAGE_CACHE_SIZE) = 156K if CONFIG_READA_SMALL is undefined,
1199 * 64k if defined (4K page size assumed).
1202 static inline int get_max_readahead(struct inode * inode)
1204 if (!inode->i_dev || !max_readahead[MAJOR(inode->i_dev)])
1205 return vm_max_readahead;
1206 return max_readahead[MAJOR(inode->i_dev)][MINOR(inode->i_dev)];
1209 static void generic_file_readahead(int reada_ok,
1210 struct file * filp, struct inode * inode,
1213 unsigned long end_index;
1214 unsigned long index = page->index;
1215 unsigned long max_ahead, ahead;
1216 unsigned long raend;
1217 int max_readahead = get_max_readahead(inode);
1219 end_index = inode->i_size >> PAGE_CACHE_SHIFT;
1221 raend = filp->f_raend;
1225 * The current page is locked.
1226 * If the current position is inside the previous read IO request, do not
1227 * try to reread previously read ahead pages.
1228 * Otherwise decide or not to read ahead some pages synchronously.
1229 * If we are not going to read ahead, set the read ahead context for this
1232 if (PageLocked(page)) {
1233 if (!filp->f_ralen || index >= raend || index + filp->f_rawin < raend) {
1235 if (raend < end_index)
1236 max_ahead = filp->f_ramax;
1240 filp->f_raend = index + filp->f_ralen;
1241 filp->f_rawin += filp->f_ralen;
1246 * The current page is not locked.
1247 * If we were reading ahead and,
1248 * if the current max read ahead size is not zero and,
1249 * if the current position is inside the last read-ahead IO request,
1250 * it is the moment to try to read ahead asynchronously.
1251 * We will later force unplug device in order to force asynchronous read IO.
1253 else if (reada_ok && filp->f_ramax && raend >= 1 &&
1254 index <= raend && index + filp->f_ralen >= raend) {
1256 * Add ONE page to max_ahead in order to try to have about the same IO max size
1257 * as synchronous read-ahead (MAX_READAHEAD + 1)*PAGE_CACHE_SIZE.
1258 * Compute the position of the last page we have tried to read in order to
1259 * begin to read ahead just at the next page.
1262 if (raend < end_index)
1263 max_ahead = filp->f_ramax + 1;
1266 filp->f_rawin = filp->f_ralen;
1272 * Try to read ahead pages.
1273 * We hope that ll_rw_blk() plug/unplug, coalescence, requests sort and the
1274 * scheduler, will work enough for us to avoid too bad actuals IO requests.
1277 while (ahead < max_ahead) {
1279 if ((raend + ahead) >= end_index)
1281 if (page_cache_read(filp, raend + ahead) < 0)
1285 * If we tried to read ahead some pages,
1286 * If we tried to read ahead asynchronously,
1287 * Try to force unplug of the device in order to start an asynchronous
1289 * Update the read-ahead context.
1290 * Store the length of the current read-ahead window.
1291 * Double the current max read ahead size.
1292 * That heuristic avoid to do some large IO for files that are not really
1293 * accessed sequentially.
1296 filp->f_ralen += ahead;
1297 filp->f_rawin += filp->f_ralen;
1298 filp->f_raend = raend + ahead + 1;
1300 filp->f_ramax += filp->f_ramax;
1302 if (filp->f_ramax > max_readahead)
1303 filp->f_ramax = max_readahead;
1305 #ifdef PROFILE_READAHEAD
1306 profile_readahead((reada_ok == 2), filp);
1314 * Mark a page as having seen activity.
1316 * If it was already so marked, move it to the active queue and drop
1317 * the referenced bit. Otherwise, just mark it for future action..
1319 void mark_page_accessed(struct page *page)
1321 if (!PageActive(page) && PageReferenced(page)) {
1322 activate_page(page);
1323 ClearPageReferenced(page);
1325 SetPageReferenced(page);
1329 * This is a generic file read routine, and uses the
1330 * inode->i_op->readpage() function for the actual low-level
1333 * This is really ugly. But the goto's actually try to clarify some
1334 * of the logic when it comes to error handling etc.
1336 void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc, read_actor_t actor)
1338 struct address_space *mapping = filp->f_dentry->d_inode->i_mapping;
1339 struct inode *inode = mapping->host;
1340 unsigned long index, offset;
1341 struct page *cached_page;
1344 int max_readahead = get_max_readahead(inode);
1347 index = *ppos >> PAGE_CACHE_SHIFT;
1348 offset = *ppos & ~PAGE_CACHE_MASK;
1351 * If the current position is outside the previous read-ahead window,
1352 * we reset the current read-ahead context and set read ahead max to zero
1353 * (will be set to just needed value later),
1354 * otherwise, we assume that the file accesses are sequential enough to
1355 * continue read-ahead.
1357 if (index > filp->f_raend || index + filp->f_rawin < filp->f_raend) {
1367 * Adjust the current value of read-ahead max.
1368 * If the read operation stay in the first half page, force no readahead.
1369 * Otherwise try to increase read ahead max just enough to do the read request.
1370 * Then, at least MIN_READAHEAD if read ahead is ok,
1371 * and at most MAX_READAHEAD in all cases.
1373 if (!index && offset + desc->count <= (PAGE_CACHE_SIZE >> 1)) {
1376 unsigned long needed;
1378 needed = ((offset + desc->count) >> PAGE_CACHE_SHIFT) + 1;
1380 if (filp->f_ramax < needed)
1381 filp->f_ramax = needed;
1383 if (reada_ok && filp->f_ramax < vm_min_readahead)
1384 filp->f_ramax = vm_min_readahead;
1385 if (filp->f_ramax > max_readahead)
1386 filp->f_ramax = max_readahead;
1390 struct page *page, **hash;
1391 unsigned long end_index, nr, ret;
1393 end_index = inode->i_size >> PAGE_CACHE_SHIFT;
1395 if (index > end_index)
1397 nr = PAGE_CACHE_SIZE;
1398 if (index == end_index) {
1399 nr = inode->i_size & ~PAGE_CACHE_MASK;
1407 * Try to find the data in the page cache..
1409 hash = page_hash(mapping, index);
1411 spin_lock(&pagecache_lock);
1412 page = __find_page_nolock(mapping, index, *hash);
1414 goto no_cached_page;
1416 page_cache_get(page);
1417 spin_unlock(&pagecache_lock);
1419 if (!Page_Uptodate(page))
1420 goto page_not_up_to_date;
1421 generic_file_readahead(reada_ok, filp, inode, page);
1423 /* If users can be writing to this page using arbitrary
1424 * virtual addresses, take care about potential aliasing
1425 * before reading the page on the kernel side.
1427 if (mapping->i_mmap_shared != NULL)
1428 flush_dcache_page(page);
1431 * Mark the page accessed if we read the
1432 * beginning or we just did an lseek.
1434 if (!offset || !filp->f_reada)
1435 mark_page_accessed(page);
1438 * Ok, we have the page, and it's up-to-date, so
1439 * now we can copy it to user space...
1441 * The actor routine returns how many bytes were actually used..
1442 * NOTE! This may not be the same as how much of a user buffer
1443 * we filled up (we may be padding etc), so we can only update
1444 * "pos" here (the actor routine has to update the user buffer
1445 * pointers and the remaining count).
1447 ret = actor(desc, page, offset, nr);
1449 index += offset >> PAGE_CACHE_SHIFT;
1450 offset &= ~PAGE_CACHE_MASK;
1452 page_cache_release(page);
1453 if (ret == nr && desc->count)
1458 * Ok, the page was not immediately readable, so let's try to read ahead while we're at it..
1460 page_not_up_to_date:
1461 generic_file_readahead(reada_ok, filp, inode, page);
1463 if (Page_Uptodate(page))
1466 /* Get exclusive access to the page ... */
1469 /* Did it get unhashed before we got the lock? */
1470 if (!page->mapping) {
1472 page_cache_release(page);
1476 /* Did somebody else fill it already? */
1477 if (Page_Uptodate(page)) {
1483 /* ... and start the actual read. The read will unlock the page. */
1484 error = mapping->a_ops->readpage(filp, page);
1487 if (Page_Uptodate(page))
1490 /* Again, try some read-ahead while waiting for the page to finish.. */
1491 generic_file_readahead(reada_ok, filp, inode, page);
1493 if (Page_Uptodate(page))
1498 /* UHHUH! A synchronous read error occurred. Report it */
1499 desc->error = error;
1500 page_cache_release(page);
1505 * Ok, it wasn't cached, so we need to create a new
1508 * We get here with the page cache lock held.
1511 spin_unlock(&pagecache_lock);
1512 cached_page = page_cache_alloc(mapping);
1514 desc->error = -ENOMEM;
1519 * Somebody may have added the page while we
1520 * dropped the page cache lock. Check for that.
1522 spin_lock(&pagecache_lock);
1523 page = __find_page_nolock(mapping, index, *hash);
1529 * Ok, add the new page to the hash-queues...
1532 __add_to_page_cache(page, mapping, index, hash);
1533 spin_unlock(&pagecache_lock);
1534 lru_cache_add(page);
1540 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1543 page_cache_release(cached_page);
1544 UPDATE_ATIME(inode);
1547 static ssize_t generic_file_direct_IO(int rw, struct file * filp, char * buf, size_t count, loff_t offset)
1550 int new_iobuf, chunk_size, blocksize_mask, blocksize, blocksize_bits, iosize, progress;
1551 struct kiobuf * iobuf;
1552 struct address_space * mapping = filp->f_dentry->d_inode->i_mapping;
1553 struct inode * inode = mapping->host;
1554 loff_t size = inode->i_size;
1557 iobuf = filp->f_iobuf;
1558 if (test_and_set_bit(0, &filp->f_iobuf_lock)) {
1560 * A parallel read/write is using the preallocated iobuf
1561 * so just run slow and allocate a new one.
1563 retval = alloc_kiovec(1, &iobuf);
1569 blocksize = 1 << inode->i_blkbits;
1570 blocksize_bits = inode->i_blkbits;
1571 blocksize_mask = blocksize - 1;
1572 chunk_size = KIO_MAX_ATOMIC_IO << 10;
1575 if ((offset & blocksize_mask) || (count & blocksize_mask) || ((unsigned long) buf & blocksize_mask))
1577 if (!mapping->a_ops->direct_IO)
1580 if ((rw == READ) && (offset + count > size))
1581 count = size - offset;
1584 * Flush to disk exclusively the _data_, metadata must remain
1585 * completly asynchronous or performance will go to /dev/null.
1587 retval = filemap_fdatasync(mapping);
1589 retval = fsync_inode_data_buffers(inode);
1591 retval = filemap_fdatawait(mapping);
1595 progress = retval = 0;
1598 if (iosize > chunk_size)
1599 iosize = chunk_size;
1601 retval = map_user_kiobuf(rw, iobuf, (unsigned long) buf, iosize);
1605 retval = mapping->a_ops->direct_IO(rw, inode, iobuf, (offset+progress) >> blocksize_bits, blocksize);
1607 if (rw == READ && retval > 0)
1608 mark_dirty_kiobuf(iobuf, retval);
1613 /* warning: weird semantics here, we're reporting a read behind the end of the file */
1617 unmap_kiobuf(iobuf);
1619 if (retval != iosize)
1628 clear_bit(0, &filp->f_iobuf_lock);
1630 free_kiovec(1, &iobuf);
1635 int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
1638 unsigned long left, count = desc->count;
1644 left = __copy_to_user(desc->buf, kaddr + offset, size);
1649 desc->error = -EFAULT;
1651 desc->count = count - size;
1652 desc->written += size;
1658 * This is the "read()" routine for all filesystems
1659 * that can use the page cache directly.
1661 ssize_t generic_file_read(struct file * filp, char * buf, size_t count, loff_t *ppos)
1665 if ((ssize_t) count < 0)
1668 if (filp->f_flags & O_DIRECT)
1672 if (access_ok(VERIFY_WRITE, buf, count)) {
1676 read_descriptor_t desc;
1682 do_generic_file_read(filp, ppos, &desc, file_read_actor);
1684 retval = desc.written;
1686 retval = desc.error;
1694 loff_t pos = *ppos, size;
1695 struct address_space *mapping = filp->f_dentry->d_inode->i_mapping;
1696 struct inode *inode = mapping->host;
1700 goto out; /* skip atime */
1701 size = inode->i_size;
1703 retval = generic_file_direct_IO(READ, filp, buf, count, pos);
1705 *ppos = pos + retval;
1707 UPDATE_ATIME(filp->f_dentry->d_inode);
1712 static int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset , unsigned long size)
1715 unsigned long count = desc->count;
1716 struct file *file = (struct file *) desc->buf;
1721 if (file->f_op->sendpage) {
1722 written = file->f_op->sendpage(file, page, offset,
1723 size, &file->f_pos, size<count);
1726 mm_segment_t old_fs;
1732 written = file->f_op->write(file, kaddr + offset, size, &file->f_pos);
1738 desc->error = written;
1741 desc->count = count - written;
1742 desc->written += written;
1746 asmlinkage ssize_t sys_sendfile(int out_fd, int in_fd, off_t *offset, size_t count)
1749 struct file * in_file, * out_file;
1750 struct inode * in_inode, * out_inode;
1753 * Get input file, and verify that it is ok..
1756 in_file = fget(in_fd);
1759 if (!(in_file->f_mode & FMODE_READ))
1762 in_inode = in_file->f_dentry->d_inode;
1765 if (!in_inode->i_mapping->a_ops->readpage)
1767 retval = locks_verify_area(FLOCK_VERIFY_READ, in_inode, in_file, in_file->f_pos, count);
1772 * Get output file, and verify that it is ok..
1775 out_file = fget(out_fd);
1778 if (!(out_file->f_mode & FMODE_WRITE))
1781 if (!out_file->f_op || !out_file->f_op->write)
1783 out_inode = out_file->f_dentry->d_inode;
1784 retval = locks_verify_area(FLOCK_VERIFY_WRITE, out_inode, out_file, out_file->f_pos, count);
1790 read_descriptor_t desc;
1791 loff_t pos = 0, *ppos;
1794 ppos = &in_file->f_pos;
1796 if (get_user(pos, offset))
1803 desc.buf = (char *) out_file;
1805 do_generic_file_read(in_file, ppos, &desc, file_send_actor);
1807 retval = desc.written;
1809 retval = desc.error;
1811 put_user(pos, offset);
1822 static ssize_t do_readahead(struct file *file, unsigned long index, unsigned long nr)
1824 struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
1827 if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1830 /* Limit it to the size of the file.. */
1831 max = (mapping->host->i_size + ~PAGE_CACHE_MASK) >> PAGE_CACHE_SHIFT;
1838 /* And limit it to a sane percentage of the inactive list.. */
1839 max = nr_inactive_pages / 2;
1844 page_cache_read(file, index);
1851 asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1859 if (file->f_mode & FMODE_READ) {
1860 unsigned long start = offset >> PAGE_CACHE_SHIFT;
1861 unsigned long len = (count + ((long)offset & ~PAGE_CACHE_MASK)) >> PAGE_CACHE_SHIFT;
1862 ret = do_readahead(file, start, len);
1870 * Read-ahead and flush behind for MADV_SEQUENTIAL areas. Since we are
1871 * sure this is sequential access, we don't need a flexible read-ahead
1872 * window size -- we can always use a large fixed size window.
1874 static void nopage_sequential_readahead(struct vm_area_struct * vma,
1875 unsigned long pgoff, unsigned long filesize)
1877 unsigned long ra_window;
1879 ra_window = get_max_readahead(vma->vm_file->f_dentry->d_inode);
1880 ra_window = CLUSTER_OFFSET(ra_window + CLUSTER_PAGES - 1);
1882 /* vm_raend is zero if we haven't read ahead in this area yet. */
1883 if (vma->vm_raend == 0)
1884 vma->vm_raend = vma->vm_pgoff + ra_window;
1887 * If we've just faulted the page half-way through our window,
1888 * then schedule reads for the next window, and release the
1889 * pages in the previous window.
1891 if ((pgoff + (ra_window >> 1)) == vma->vm_raend) {
1892 unsigned long start = vma->vm_pgoff + vma->vm_raend;
1893 unsigned long end = start + ra_window;
1895 if (end > ((vma->vm_end >> PAGE_SHIFT) + vma->vm_pgoff))
1896 end = (vma->vm_end >> PAGE_SHIFT) + vma->vm_pgoff;
1900 while ((start < end) && (start < filesize)) {
1901 if (read_cluster_nonblocking(vma->vm_file,
1902 start, filesize) < 0)
1904 start += CLUSTER_PAGES;
1906 run_task_queue(&tq_disk);
1908 /* if we're far enough past the beginning of this area,
1909 recycle pages that are in the previous window. */
1910 if (vma->vm_raend > (vma->vm_pgoff + ra_window + ra_window)) {
1911 unsigned long window = ra_window << PAGE_SHIFT;
1913 end = vma->vm_start + (vma->vm_raend << PAGE_SHIFT);
1914 end -= window + window;
1915 filemap_sync(vma, end - window, window, MS_INVALIDATE);
1918 vma->vm_raend += ra_window;
1925 * filemap_nopage() is invoked via the vma operations vector for a
1926 * mapped memory region to read in file data during a page fault.
1928 * The goto's are kind of ugly, but this streamlines the normal case of having
1929 * it in the page cache, and handles the special cases reasonably without
1930 * having a lot of duplicated code.
1932 struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address, int unused)
1935 struct file *file = area->vm_file;
1936 struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
1937 struct inode *inode = mapping->host;
1938 struct page *page, **hash;
1939 unsigned long size, pgoff, endoff;
1941 pgoff = ((address - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
1942 endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
1946 * An external ptracer can access pages that normally aren't
1949 size = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1950 if ((pgoff >= size) && (area->vm_mm == current->mm))
1953 /* The "size" of the file, as far as mmap is concerned, isn't bigger than the mapping */
1958 * Do we have something in the page cache already?
1960 hash = page_hash(mapping, pgoff);
1962 page = __find_get_page(mapping, pgoff, hash);
1964 goto no_cached_page;
1967 * Ok, found a page in the page cache, now we need to check
1968 * that it's up-to-date.
1970 if (!Page_Uptodate(page))
1971 goto page_not_uptodate;
1975 * Try read-ahead for sequential areas.
1977 if (VM_SequentialReadHint(area))
1978 nopage_sequential_readahead(area, pgoff, size);
1981 * Found the page and have a reference on it, need to check sharing
1982 * and possibly copy it over to another page..
1984 mark_page_accessed(page);
1985 flush_page_to_ram(page);
1990 * If the requested offset is within our file, try to read a whole
1991 * cluster of pages at once.
1993 * Otherwise, we're off the end of a privately mapped file,
1994 * so we need to map a zero page.
1996 if ((pgoff < size) && !VM_RandomReadHint(area))
1997 error = read_cluster_nonblocking(file, pgoff, size);
1999 error = page_cache_read(file, pgoff);
2002 * The page we want has now been added to the page cache.
2003 * In the unlikely event that someone removed it in the
2004 * meantime, we'll just come back here and read it again.
2010 * An error return from page_cache_read can result if the
2011 * system is low on memory, or a problem occurs while trying
2014 if (error == -ENOMEM)
2021 /* Did it get unhashed while we waited for it? */
2022 if (!page->mapping) {
2024 page_cache_release(page);
2028 /* Did somebody else get it up-to-date? */
2029 if (Page_Uptodate(page)) {
2034 if (!mapping->a_ops->readpage(file, page)) {
2036 if (Page_Uptodate(page))
2041 * Umm, take care of errors if the page isn't up-to-date.
2042 * Try to re-read it _once_. We do this synchronously,
2043 * because there really aren't any performance issues here
2044 * and we need to check for errors.
2048 /* Somebody truncated the page on us? */
2049 if (!page->mapping) {
2051 page_cache_release(page);
2055 /* Somebody else successfully read it in? */
2056 if (Page_Uptodate(page)) {
2060 ClearPageError(page);
2061 if (!mapping->a_ops->readpage(file, page)) {
2063 if (Page_Uptodate(page))
2068 * Things didn't work out. Return zero to tell the
2069 * mm layer so, possibly freeing the page cache page first.
2071 page_cache_release(page);
2075 /* Called with mm->page_table_lock held to protect against other
2076 * threads/the swapper from ripping pte's out from under us.
2078 static inline int filemap_sync_pte(pte_t * ptep, struct vm_area_struct *vma,
2079 unsigned long address, unsigned int flags)
2083 if (pte_present(pte)) {
2084 struct page *page = pte_page(pte);
2085 if (VALID_PAGE(page) && !PageReserved(page) && ptep_test_and_clear_dirty(ptep)) {
2086 flush_tlb_page(vma, address);
2087 set_page_dirty(page);
2093 static inline int filemap_sync_pte_range(pmd_t * pmd,
2094 unsigned long address, unsigned long size,
2095 struct vm_area_struct *vma, unsigned long offset, unsigned int flags)
2103 if (pmd_bad(*pmd)) {
2108 pte = pte_offset(pmd, address);
2109 offset += address & PMD_MASK;
2110 address &= ~PMD_MASK;
2111 end = address + size;
2116 error |= filemap_sync_pte(pte, vma, address + offset, flags);
2117 address += PAGE_SIZE;
2119 } while (address && (address < end));
2123 static inline int filemap_sync_pmd_range(pgd_t * pgd,
2124 unsigned long address, unsigned long size,
2125 struct vm_area_struct *vma, unsigned int flags)
2128 unsigned long offset, end;
2133 if (pgd_bad(*pgd)) {
2138 pmd = pmd_offset(pgd, address);
2139 offset = address & PGDIR_MASK;
2140 address &= ~PGDIR_MASK;
2141 end = address + size;
2142 if (end > PGDIR_SIZE)
2146 error |= filemap_sync_pte_range(pmd, address, end - address, vma, offset, flags);
2147 address = (address + PMD_SIZE) & PMD_MASK;
2149 } while (address && (address < end));
2153 int filemap_sync(struct vm_area_struct * vma, unsigned long address,
2154 size_t size, unsigned int flags)
2157 unsigned long end = address + size;
2160 /* Aquire the lock early; it may be possible to avoid dropping
2161 * and reaquiring it repeatedly.
2163 spin_lock(&vma->vm_mm->page_table_lock);
2165 dir = pgd_offset(vma->vm_mm, address);
2166 flush_cache_range(vma->vm_mm, end - size, end);
2170 error |= filemap_sync_pmd_range(dir, address, end - address, vma, flags);
2171 address = (address + PGDIR_SIZE) & PGDIR_MASK;
2173 } while (address && (address < end));
2174 flush_tlb_range(vma->vm_mm, end - size, end);
2176 spin_unlock(&vma->vm_mm->page_table_lock);
2181 static struct vm_operations_struct generic_file_vm_ops = {
2182 nopage: filemap_nopage,
2185 /* This is used for a general mmap of a disk file */
2187 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2189 struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
2190 struct inode *inode = mapping->host;
2192 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
2193 if (!mapping->a_ops->writepage)
2196 if (!mapping->a_ops->readpage)
2198 UPDATE_ATIME(inode);
2199 vma->vm_ops = &generic_file_vm_ops;
2204 * The msync() system call.
2208 * MS_SYNC syncs the entire file - including mappings.
2210 * MS_ASYNC initiates writeout of just the dirty mapped data.
2211 * This provides no guarantee of file integrity - things like indirect
2212 * blocks may not have started writeout. MS_ASYNC is primarily useful
2213 * where the application knows that it has finished with the data and
2214 * wishes to intelligently schedule its own I/O traffic.
2216 static int msync_interval(struct vm_area_struct * vma,
2217 unsigned long start, unsigned long end, int flags)
2220 struct file * file = vma->vm_file;
2222 if ( (flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED) )
2225 if (file && (vma->vm_flags & VM_SHARED)) {
2226 ret = filemap_sync(vma, start, end-start, flags);
2228 if (!ret && (flags & (MS_SYNC|MS_ASYNC))) {
2229 struct inode * inode = file->f_dentry->d_inode;
2231 down(&inode->i_sem);
2232 ret = filemap_fdatasync(inode->i_mapping);
2233 if (flags & MS_SYNC) {
2236 if (file->f_op && file->f_op->fsync) {
2237 err = file->f_op->fsync(file, file->f_dentry, 1);
2241 err = filemap_fdatawait(inode->i_mapping);
2251 asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
2254 struct vm_area_struct * vma;
2255 int unmapped_error, error = -EINVAL;
2257 down_read(¤t->mm->mmap_sem);
2258 if (start & ~PAGE_MASK)
2260 len = (len + ~PAGE_MASK) & PAGE_MASK;
2264 if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC))
2266 if ((flags & MS_ASYNC) && (flags & MS_SYNC))
2273 * If the interval [start,end) covers some unmapped address ranges,
2274 * just ignore them, but return -ENOMEM at the end.
2276 vma = find_vma(current->mm, start);
2279 /* Still start < end. */
2283 /* Here start < vma->vm_end. */
2284 if (start < vma->vm_start) {
2285 unmapped_error = -ENOMEM;
2286 start = vma->vm_start;
2288 /* Here vma->vm_start <= start < vma->vm_end. */
2289 if (end <= vma->vm_end) {
2291 error = msync_interval(vma, start, end, flags);
2295 error = unmapped_error;
2298 /* Here vma->vm_start <= start < vma->vm_end < end. */
2299 error = msync_interval(vma, start, vma->vm_end, flags);
2302 start = vma->vm_end;
2306 up_read(¤t->mm->mmap_sem);
2310 static inline void setup_read_behavior(struct vm_area_struct * vma,
2313 VM_ClearReadHint(vma);
2315 case MADV_SEQUENTIAL:
2316 vma->vm_flags |= VM_SEQ_READ;
2319 vma->vm_flags |= VM_RAND_READ;
2327 static long madvise_fixup_start(struct vm_area_struct * vma,
2328 unsigned long end, int behavior)
2330 struct vm_area_struct * n;
2331 struct mm_struct * mm = vma->vm_mm;
2333 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
2338 setup_read_behavior(n, behavior);
2341 get_file(n->vm_file);
2342 if (n->vm_ops && n->vm_ops->open)
2344 vma->vm_pgoff += (end - vma->vm_start) >> PAGE_SHIFT;
2345 lock_vma_mappings(vma);
2346 spin_lock(&mm->page_table_lock);
2347 vma->vm_start = end;
2348 __insert_vm_struct(mm, n);
2349 spin_unlock(&mm->page_table_lock);
2350 unlock_vma_mappings(vma);
2354 static long madvise_fixup_end(struct vm_area_struct * vma,
2355 unsigned long start, int behavior)
2357 struct vm_area_struct * n;
2358 struct mm_struct * mm = vma->vm_mm;
2360 n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
2364 n->vm_start = start;
2365 n->vm_pgoff += (n->vm_start - vma->vm_start) >> PAGE_SHIFT;
2366 setup_read_behavior(n, behavior);
2369 get_file(n->vm_file);
2370 if (n->vm_ops && n->vm_ops->open)
2372 lock_vma_mappings(vma);
2373 spin_lock(&mm->page_table_lock);
2374 vma->vm_end = start;
2375 __insert_vm_struct(mm, n);
2376 spin_unlock(&mm->page_table_lock);
2377 unlock_vma_mappings(vma);
2381 static long madvise_fixup_middle(struct vm_area_struct * vma,
2382 unsigned long start, unsigned long end, int behavior)
2384 struct vm_area_struct * left, * right;
2385 struct mm_struct * mm = vma->vm_mm;
2387 left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
2390 right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
2392 kmem_cache_free(vm_area_cachep, left);
2397 left->vm_end = start;
2398 right->vm_start = end;
2399 right->vm_pgoff += (right->vm_start - left->vm_start) >> PAGE_SHIFT;
2401 right->vm_raend = 0;
2403 atomic_add(2, &vma->vm_file->f_count);
2405 if (vma->vm_ops && vma->vm_ops->open) {
2406 vma->vm_ops->open(left);
2407 vma->vm_ops->open(right);
2409 vma->vm_pgoff += (start - vma->vm_start) >> PAGE_SHIFT;
2411 lock_vma_mappings(vma);
2412 spin_lock(&mm->page_table_lock);
2413 vma->vm_start = start;
2415 setup_read_behavior(vma, behavior);
2416 __insert_vm_struct(mm, left);
2417 __insert_vm_struct(mm, right);
2418 spin_unlock(&mm->page_table_lock);
2419 unlock_vma_mappings(vma);
2424 * We can potentially split a vm area into separate
2425 * areas, each area with its own behavior.
2427 static long madvise_behavior(struct vm_area_struct * vma,
2428 unsigned long start, unsigned long end, int behavior)
2432 /* This caps the number of vma's this process can own */
2433 if (vma->vm_mm->map_count > max_map_count)
2436 if (start == vma->vm_start) {
2437 if (end == vma->vm_end) {
2438 setup_read_behavior(vma, behavior);
2441 error = madvise_fixup_start(vma, end, behavior);
2443 if (end == vma->vm_end)
2444 error = madvise_fixup_end(vma, start, behavior);
2446 error = madvise_fixup_middle(vma, start, end, behavior);
2453 * Schedule all required I/O operations, then run the disk queue
2454 * to make sure they are started. Do not wait for completion.
2456 static long madvise_willneed(struct vm_area_struct * vma,
2457 unsigned long start, unsigned long end)
2459 long error = -EBADF;
2461 unsigned long size, rlim_rss;
2463 /* Doesn't work if there's no mapped file. */
2466 file = vma->vm_file;
2467 size = (file->f_dentry->d_inode->i_size + PAGE_CACHE_SIZE - 1) >>
2470 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2471 if (end > vma->vm_end)
2473 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2475 /* Make sure this doesn't exceed the process's max rss. */
2477 rlim_rss = current->rlim ? current->rlim[RLIMIT_RSS].rlim_cur :
2478 LONG_MAX; /* default: see resource.h */
2479 if ((vma->vm_mm->rss + (end - start)) > rlim_rss)
2482 /* round to cluster boundaries if this isn't a "random" area. */
2483 if (!VM_RandomReadHint(vma)) {
2484 start = CLUSTER_OFFSET(start);
2485 end = CLUSTER_OFFSET(end + CLUSTER_PAGES - 1);
2487 while ((start < end) && (start < size)) {
2488 error = read_cluster_nonblocking(file, start, size);
2489 start += CLUSTER_PAGES;
2494 while ((start < end) && (start < size)) {
2495 error = page_cache_read(file, start);
2502 /* Don't wait for someone else to push these requests. */
2503 run_task_queue(&tq_disk);
2509 * Application no longer needs these pages. If the pages are dirty,
2510 * it's OK to just throw them away. The app will be more careful about
2511 * data it wants to keep. Be sure to free swap resources too. The
2512 * zap_page_range call sets things up for refill_inactive to actually free
2513 * these pages later if no one else has touched them in the meantime,
2514 * although we could add these pages to a global reuse list for
2515 * refill_inactive to pick up before reclaiming other pages.
2517 * NB: This interface discards data rather than pushes it out to swap,
2518 * as some implementations do. This has performance implications for
2519 * applications like large transactional databases which want to discard
2520 * pages in anonymous maps after committing to backing store the data
2521 * that was kept in them. There is no reason to write this data out to
2522 * the swap area if the application is discarding it.
2524 * An interface that causes the system to free clean pages and flush
2525 * dirty pages is already available as msync(MS_INVALIDATE).
2527 static long madvise_dontneed(struct vm_area_struct * vma,
2528 unsigned long start, unsigned long end)
2530 if (vma->vm_flags & VM_LOCKED)
2533 zap_page_range(vma->vm_mm, start, end - start);
2537 static long madvise_vma(struct vm_area_struct * vma, unsigned long start,
2538 unsigned long end, int behavior)
2540 long error = -EBADF;
2544 case MADV_SEQUENTIAL:
2546 error = madvise_behavior(vma, start, end, behavior);
2550 error = madvise_willneed(vma, start, end);
2554 error = madvise_dontneed(vma, start, end);
2566 * The madvise(2) system call.
2568 * Applications can use madvise() to advise the kernel how it should
2569 * handle paging I/O in this VM area. The idea is to help the kernel
2570 * use appropriate read-ahead and caching techniques. The information
2571 * provided is advisory only, and can be safely disregarded by the
2572 * kernel without affecting the correct operation of the application.
2575 * MADV_NORMAL - the default behavior is to read clusters. This
2576 * results in some read-ahead and read-behind.
2577 * MADV_RANDOM - the system should read the minimum amount of data
2578 * on any access, since it is unlikely that the appli-
2579 * cation will need more than what it asks for.
2580 * MADV_SEQUENTIAL - pages in the given range will probably be accessed
2581 * once, so they can be aggressively read ahead, and
2582 * can be freed soon after they are accessed.
2583 * MADV_WILLNEED - the application is notifying the system to read
2585 * MADV_DONTNEED - the application is finished with the given range,
2586 * so the kernel can free resources associated with it.
2590 * -EINVAL - start + len < 0, start is not page-aligned,
2591 * "behavior" is not a valid value, or application
2592 * is attempting to release locked or shared pages.
2593 * -ENOMEM - addresses in the specified range are not currently
2594 * mapped, or are outside the AS of the process.
2595 * -EIO - an I/O error occurred while paging in data.
2596 * -EBADF - map exists, but area maps something that isn't a file.
2597 * -EAGAIN - a kernel resource was temporarily unavailable.
2599 asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior)
2602 struct vm_area_struct * vma;
2603 int unmapped_error = 0;
2604 int error = -EINVAL;
2606 down_write(¤t->mm->mmap_sem);
2608 if (start & ~PAGE_MASK)
2610 len = (len + ~PAGE_MASK) & PAGE_MASK;
2620 * If the interval [start,end) covers some unmapped address
2621 * ranges, just ignore them, but return -ENOMEM at the end.
2623 vma = find_vma(current->mm, start);
2625 /* Still start < end. */
2630 /* Here start < vma->vm_end. */
2631 if (start < vma->vm_start) {
2632 unmapped_error = -ENOMEM;
2633 start = vma->vm_start;
2636 /* Here vma->vm_start <= start < vma->vm_end. */
2637 if (end <= vma->vm_end) {
2639 error = madvise_vma(vma, start, end,
2644 error = unmapped_error;
2648 /* Here vma->vm_start <= start < vma->vm_end < end. */
2649 error = madvise_vma(vma, start, vma->vm_end, behavior);
2652 start = vma->vm_end;
2657 up_write(¤t->mm->mmap_sem);
2662 * Later we can get more picky about what "in core" means precisely.
2663 * For now, simply check to see if the page is in the page cache,
2664 * and is up to date; i.e. that no page-in operation would be required
2665 * at this time if an application were to map and access this page.
2667 static unsigned char mincore_page(struct vm_area_struct * vma,
2668 unsigned long pgoff)
2670 unsigned char present = 0;
2671 struct address_space * as = vma->vm_file->f_dentry->d_inode->i_mapping;
2672 struct page * page, ** hash = page_hash(as, pgoff);
2674 spin_lock(&pagecache_lock);
2675 page = __find_page_nolock(as, pgoff, *hash);
2676 if ((page) && (Page_Uptodate(page)))
2678 spin_unlock(&pagecache_lock);
2683 static long mincore_vma(struct vm_area_struct * vma,
2684 unsigned long start, unsigned long end, unsigned char * vec)
2686 long error, i, remaining;
2687 unsigned char * tmp;
2693 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2694 if (end > vma->vm_end)
2696 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2699 tmp = (unsigned char *) __get_free_page(GFP_KERNEL);
2703 /* (end - start) is # of pages, and also # of bytes in "vec */
2704 remaining = (end - start),
2707 for (i = 0; remaining > 0; remaining -= PAGE_SIZE, i++) {
2709 long thispiece = (remaining < PAGE_SIZE) ?
2710 remaining : PAGE_SIZE;
2712 while (j < thispiece)
2713 tmp[j++] = mincore_page(vma, start++);
2715 if (copy_to_user(vec + PAGE_SIZE * i, tmp, thispiece)) {
2721 free_page((unsigned long) tmp);
2726 * The mincore(2) system call.
2728 * mincore() returns the memory residency status of the pages in the
2729 * current process's address space specified by [addr, addr + len).
2730 * The status is returned in a vector of bytes. The least significant
2731 * bit of each byte is 1 if the referenced page is in memory, otherwise
2734 * Because the status of a page can change after mincore() checks it
2735 * but before it returns to the application, the returned vector may
2736 * contain stale information. Only locked pages are guaranteed to
2741 * -EFAULT - vec points to an illegal address
2742 * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE,
2743 * or len has a nonpositive value
2744 * -ENOMEM - Addresses in the range [addr, addr + len] are
2745 * invalid for the address space of this process, or
2746 * specify one or more pages which are not currently
2748 * -EAGAIN - A kernel resource was temporarily unavailable.
2750 asmlinkage long sys_mincore(unsigned long start, size_t len,
2751 unsigned char * vec)
2755 struct vm_area_struct * vma;
2756 int unmapped_error = 0;
2757 long error = -EINVAL;
2759 down_read(¤t->mm->mmap_sem);
2761 if (start & ~PAGE_CACHE_MASK)
2763 len = (len + ~PAGE_CACHE_MASK) & PAGE_CACHE_MASK;
2773 * If the interval [start,end) covers some unmapped address
2774 * ranges, just ignore them, but return -ENOMEM at the end.
2776 vma = find_vma(current->mm, start);
2778 /* Still start < end. */
2783 /* Here start < vma->vm_end. */
2784 if (start < vma->vm_start) {
2785 unmapped_error = -ENOMEM;
2786 start = vma->vm_start;
2789 /* Here vma->vm_start <= start < vma->vm_end. */
2790 if (end <= vma->vm_end) {
2792 error = mincore_vma(vma, start, end,
2797 error = unmapped_error;
2801 /* Here vma->vm_start <= start < vma->vm_end < end. */
2802 error = mincore_vma(vma, start, vma->vm_end, &vec[index]);
2805 index += (vma->vm_end - start) >> PAGE_CACHE_SHIFT;
2806 start = vma->vm_end;
2811 up_read(¤t->mm->mmap_sem);
2816 struct page *__read_cache_page(struct address_space *mapping,
2817 unsigned long index,
2818 int (*filler)(void *,struct page*),
2821 struct page **hash = page_hash(mapping, index);
2822 struct page *page, *cached_page = NULL;
2825 page = __find_get_page(mapping, index, hash);
2828 cached_page = page_cache_alloc(mapping);
2830 return ERR_PTR(-ENOMEM);
2833 if (add_to_page_cache_unique(page, mapping, index, hash))
2836 err = filler(data, page);
2838 page_cache_release(page);
2839 page = ERR_PTR(err);
2843 page_cache_release(cached_page);
2848 * Read into the page cache. If a page already exists,
2849 * and Page_Uptodate() is not set, try to fill the page.
2851 struct page *read_cache_page(struct address_space *mapping,
2852 unsigned long index,
2853 int (*filler)(void *,struct page*),
2860 page = __read_cache_page(mapping, index, filler, data);
2863 mark_page_accessed(page);
2864 if (Page_Uptodate(page))
2868 if (!page->mapping) {
2870 page_cache_release(page);
2873 if (Page_Uptodate(page)) {
2877 err = filler(data, page);
2879 page_cache_release(page);
2880 page = ERR_PTR(err);
2886 static inline struct page * __grab_cache_page(struct address_space *mapping,
2887 unsigned long index, struct page **cached_page)
2889 struct page *page, **hash = page_hash(mapping, index);
2891 page = __find_lock_page(mapping, index, hash);
2893 if (!*cached_page) {
2894 *cached_page = page_cache_alloc(mapping);
2898 page = *cached_page;
2899 if (add_to_page_cache_unique(page, mapping, index, hash))
2901 *cached_page = NULL;
2906 inline void remove_suid(struct inode *inode)
2910 /* set S_IGID if S_IXGRP is set, and always set S_ISUID */
2911 mode = (inode->i_mode & S_IXGRP)*(S_ISGID/S_IXGRP) | S_ISUID;
2913 /* was any of the uid bits set? */
2914 mode &= inode->i_mode;
2915 if (mode && !capable(CAP_FSETID)) {
2916 inode->i_mode &= ~mode;
2917 mark_inode_dirty(inode);
2922 * Write to a file through the page cache.
2924 * We currently put everything into the page cache prior to writing it.
2925 * This is not a problem when writing full pages. With partial pages,
2926 * however, we first have to read the data into the cache, then
2927 * dirty the page, and finally schedule it for writing. Alternatively, we
2928 * could write-through just the portion of data that would go into that
2929 * page, but that would kill performance for applications that write data
2930 * line by line, and it's prone to race conditions.
2932 * Note that this routine doesn't try to keep track of dirty pages. Each
2933 * file system has to do this all by itself, unfortunately.
2937 generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos)
2939 struct address_space *mapping = file->f_dentry->d_inode->i_mapping;
2940 struct inode *inode = mapping->host;
2941 unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
2943 struct page *page, *cached_page;
2949 if ((ssize_t) count < 0)
2952 if (!access_ok(VERIFY_READ, buf, count))
2957 down(&inode->i_sem);
2964 err = file->f_error;
2972 /* FIXME: this is for backwards compatibility with 2.4 */
2973 if (!S_ISBLK(inode->i_mode) && file->f_flags & O_APPEND)
2974 pos = inode->i_size;
2977 * Check whether we've reached the file size limit.
2981 if (!S_ISBLK(inode->i_mode) && limit != RLIM_INFINITY) {
2983 send_sig(SIGXFSZ, current, 0);
2986 if (pos > 0xFFFFFFFFULL || count > limit - (u32)pos) {
2987 /* send_sig(SIGXFSZ, current, 0); */
2988 count = limit - (u32)pos;
2995 if ( pos + count > MAX_NON_LFS && !(file->f_flags&O_LARGEFILE)) {
2996 if (pos >= MAX_NON_LFS) {
2997 send_sig(SIGXFSZ, current, 0);
3000 if (count > MAX_NON_LFS - (u32)pos) {
3001 /* send_sig(SIGXFSZ, current, 0); */
3002 count = MAX_NON_LFS - (u32)pos;
3007 * Are we about to exceed the fs block limit ?
3009 * If we have written data it becomes a short write
3010 * If we have exceeded without writing data we send
3011 * a signal and give them an EFBIG.
3013 * Linus frestrict idea will clean these up nicely..
3016 if (!S_ISBLK(inode->i_mode)) {
3017 if (pos >= inode->i_sb->s_maxbytes)
3019 if (count || pos > inode->i_sb->s_maxbytes) {
3020 send_sig(SIGXFSZ, current, 0);
3024 /* zero-length writes at ->s_maxbytes are OK */
3027 if (pos + count > inode->i_sb->s_maxbytes)
3028 count = inode->i_sb->s_maxbytes - pos;
3030 if (is_read_only(inode->i_rdev)) {
3034 if (pos >= inode->i_size) {
3035 if (count || pos > inode->i_size) {
3041 if (pos + count > inode->i_size)
3042 count = inode->i_size - pos;
3050 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
3051 mark_inode_dirty_sync(inode);
3053 if (file->f_flags & O_DIRECT)
3057 unsigned long index, offset;
3062 * Try to find the page in the cache. If it isn't there,
3063 * allocate a free page.
3065 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
3066 index = pos >> PAGE_CACHE_SHIFT;
3067 bytes = PAGE_CACHE_SIZE - offset;
3072 * Bring in the user page that we will copy from _first_.
3073 * Otherwise there's a nasty deadlock on copying from the
3074 * same page as we're writing to, without it being marked
3077 { volatile unsigned char dummy;
3078 __get_user(dummy, buf);
3079 __get_user(dummy, buf+bytes-1);
3082 status = -ENOMEM; /* we'll assign it later anyway */
3083 page = __grab_cache_page(mapping, index, &cached_page);
3087 /* We have exclusive IO access to the page.. */
3088 if (!PageLocked(page)) {
3093 status = mapping->a_ops->prepare_write(file, page, offset, offset+bytes);
3096 page_fault = __copy_from_user(kaddr+offset, buf, bytes);
3097 flush_dcache_page(page);
3098 status = mapping->a_ops->commit_write(file, page, offset, offset+bytes);
3112 /* Mark it unlocked again and drop the page.. */
3113 SetPageReferenced(page);
3115 page_cache_release(page);
3124 page_cache_release(cached_page);
3126 /* For now, when the user asks for O_SYNC, we'll actually
3127 * provide O_DSYNC. */
3129 if ((file->f_flags & O_SYNC) || IS_SYNC(inode))
3130 status = generic_osync_inode(inode, OSYNC_METADATA|OSYNC_DATA);
3134 err = written ? written : status;
3145 * If blocksize < pagesize, prepare_write() may have instantiated a
3146 * few blocks outside i_size. Trim these off again.
3150 page_cache_release(page);
3151 if (pos + bytes > inode->i_size)
3152 vmtruncate(inode, inode->i_size);
3156 written = generic_file_direct_IO(WRITE, file, (char *) buf, count, pos);
3158 loff_t end = pos + written;
3159 if (end > inode->i_size && !S_ISBLK(inode->i_mode)) {
3160 inode->i_size = end;
3161 mark_inode_dirty(inode);
3164 invalidate_inode_pages2(mapping);
3167 * Sync the fs metadata but not the minor inode changes and
3168 * of course not the data as we did direct DMA for the IO.
3170 if (written >= 0 && file->f_flags & O_SYNC)
3171 status = generic_osync_inode(inode, OSYNC_METADATA);
3175 void __init page_cache_init(unsigned long mempages)
3177 unsigned long htable_size, order;
3179 htable_size = mempages;
3180 htable_size *= sizeof(struct page *);
3181 for(order = 0; (PAGE_SIZE << order) < htable_size; order++)
3185 unsigned long tmp = (PAGE_SIZE << order) / sizeof(struct page *);
3188 while((tmp >>= 1UL) != 0UL)
3191 page_hash_table = (struct page **)
3192 __get_free_pages(GFP_ATOMIC, order);
3193 } while(page_hash_table == NULL && --order > 0);
3195 printk("Page-cache hash table entries: %d (order: %ld, %ld bytes)\n",
3196 (1 << page_hash_bits), order, (PAGE_SIZE << order));
3197 if (!page_hash_table)
3198 panic("Failed to allocate page hash table\n");
3199 memset((void *)page_hash_table, 0, PAGE_HASH_SIZE * sizeof(struct page *));