[PATCH] visws: reexport pm_power_off
[powerpc.git] / fs / buffer.c
index 188365c..6a25d7d 100644 (file)
@@ -278,7 +278,7 @@ EXPORT_SYMBOL(thaw_bdev);
  */
 static void do_sync(unsigned long wait)
 {
-       wakeup_bdflush(0);
+       wakeup_pdflush(0);
        sync_inodes(0);         /* All mappings, inodes and their blockdevs */
        DQUOT_SYNC(NULL);
        sync_supers();          /* Write the superblocks */
@@ -331,7 +331,7 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
        return ret;
 }
 
-asmlinkage long sys_fsync(unsigned int fd)
+static long do_fsync(unsigned int fd, int datasync)
 {
        struct file * file;
        struct address_space *mapping;
@@ -342,14 +342,14 @@ asmlinkage long sys_fsync(unsigned int fd)
        if (!file)
                goto out;
 
-       mapping = file->f_mapping;
-
        ret = -EINVAL;
        if (!file->f_op || !file->f_op->fsync) {
                /* Why?  We can still call filemap_fdatawrite */
                goto out_putf;
        }
 
+       mapping = file->f_mapping;
+
        current->flags |= PF_SYNCWRITE;
        ret = filemap_fdatawrite(mapping);
 
@@ -358,7 +358,7 @@ asmlinkage long sys_fsync(unsigned int fd)
         * which could cause livelocks in fsync_buffers_list
         */
        down(&mapping->host->i_sem);
-       err = file->f_op->fsync(file, file->f_dentry, 0);
+       err = file->f_op->fsync(file, file->f_dentry, datasync);
        if (!ret)
                ret = err;
        up(&mapping->host->i_sem);
@@ -373,39 +373,14 @@ out:
        return ret;
 }
 
-asmlinkage long sys_fdatasync(unsigned int fd)
+asmlinkage long sys_fsync(unsigned int fd)
 {
-       struct file * file;
-       struct address_space *mapping;
-       int ret, err;
-
-       ret = -EBADF;
-       file = fget(fd);
-       if (!file)
-               goto out;
-
-       ret = -EINVAL;
-       if (!file->f_op || !file->f_op->fsync)
-               goto out_putf;
-
-       mapping = file->f_mapping;
-
-       current->flags |= PF_SYNCWRITE;
-       ret = filemap_fdatawrite(mapping);
-       down(&mapping->host->i_sem);
-       err = file->f_op->fsync(file, file->f_dentry, 1);
-       if (!ret)
-               ret = err;
-       up(&mapping->host->i_sem);
-       err = filemap_fdatawait(mapping);
-       if (!ret)
-               ret = err;
-       current->flags &= ~PF_SYNCWRITE;
+       return do_fsync(fd, 0);
+}
 
-out_putf:
-       fput(file);
-out:
-       return ret;
+asmlinkage long sys_fdatasync(unsigned int fd)
+{
+       return do_fsync(fd, 1);
 }
 
 /*
@@ -522,13 +497,13 @@ static void free_more_memory(void)
        struct zone **zones;
        pg_data_t *pgdat;
 
-       wakeup_bdflush(1024);
+       wakeup_pdflush(1024);
        yield();
 
        for_each_pgdat(pgdat) {
                zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
                if (*zones)
-                       try_to_free_pages(zones, GFP_NOFS, 0);
+                       try_to_free_pages(zones, GFP_NOFS);
        }
 }
 
@@ -538,8 +513,8 @@ static void free_more_memory(void)
  */
 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
 {
-       static DEFINE_SPINLOCK(page_uptodate_lock);
        unsigned long flags;
+       struct buffer_head *first;
        struct buffer_head *tmp;
        struct page *page;
        int page_uptodate = 1;
@@ -561,7 +536,9 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
         * two buffer heads end IO at almost the same time and both
         * decide that the page is now completely done.
         */
-       spin_lock_irqsave(&page_uptodate_lock, flags);
+       first = page_buffers(page);
+       local_irq_save(flags);
+       bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
        clear_buffer_async_read(bh);
        unlock_buffer(bh);
        tmp = bh;
@@ -574,7 +551,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
                }
                tmp = tmp->b_this_page;
        } while (tmp != bh);
-       spin_unlock_irqrestore(&page_uptodate_lock, flags);
+       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+       local_irq_restore(flags);
 
        /*
         * If none of the buffers had errors and they are all
@@ -586,7 +564,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
        return;
 
 still_busy:
-       spin_unlock_irqrestore(&page_uptodate_lock, flags);
+       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+       local_irq_restore(flags);
        return;
 }
 
@@ -597,8 +576,8 @@ still_busy:
 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
 {
        char b[BDEVNAME_SIZE];
-       static DEFINE_SPINLOCK(page_uptodate_lock);
        unsigned long flags;
+       struct buffer_head *first;
        struct buffer_head *tmp;
        struct page *page;
 
@@ -619,7 +598,10 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
                SetPageError(page);
        }
 
-       spin_lock_irqsave(&page_uptodate_lock, flags);
+       first = page_buffers(page);
+       local_irq_save(flags);
+       bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+
        clear_buffer_async_write(bh);
        unlock_buffer(bh);
        tmp = bh->b_this_page;
@@ -630,12 +612,14 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
                }
                tmp = tmp->b_this_page;
        }
-       spin_unlock_irqrestore(&page_uptodate_lock, flags);
+       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+       local_irq_restore(flags);
        end_page_writeback(page);
        return;
 
 still_busy:
-       spin_unlock_irqrestore(&page_uptodate_lock, flags);
+       bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+       local_irq_restore(flags);
        return;
 }
 
@@ -774,15 +758,14 @@ repeat:
 /**
  * sync_mapping_buffers - write out and wait upon a mapping's "associated"
  *                        buffers
- * @buffer_mapping - the mapping which backs the buffers' data
- * @mapping - the mapping which wants those buffers written
+ * @mapping: the mapping which wants those buffers written
  *
  * Starts I/O against the buffers at mapping->private_list, and waits upon
  * that I/O.
  *
- * Basically, this is a convenience function for fsync().  @buffer_mapping is
- * the blockdev which "owns" the buffers and @mapping is a file or directory
- * which needs those buffers to be written for a successful fsync().
+ * Basically, this is a convenience function for fsync().
+ * @mapping is a file or directory which needs those buffers to be written for
+ * a successful fsync().
  */
 int sync_mapping_buffers(struct address_space *mapping)
 {
@@ -1211,7 +1194,7 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
        return 1;
 }
 
-struct buffer_head *
+static struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block, int size)
 {
        /* Size must be multiple of hard sectorsize */
@@ -1263,6 +1246,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
 
 /**
  * mark_buffer_dirty - mark a buffer_head as needing writeout
+ * @bh: the buffer_head to mark dirty
  *
  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
  * backing page dirty, then tag the page as dirty in its address_space's radix
@@ -1501,6 +1485,7 @@ EXPORT_SYMBOL(__breadahead);
 
 /**
  *  __bread() - reads a specified block and returns the bh
+ *  @bdev: the block_device to read from
  *  @block: number of block
  *  @size: size (in bytes) to read
  * 
@@ -1808,7 +1793,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        } while (bh != head);
 
        do {
-               get_bh(bh);
                if (!buffer_mapped(bh))
                        continue;
                /*
@@ -1837,7 +1821,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
         */
        BUG_ON(PageWriteback(page));
        set_page_writeback(page);
-       unlock_page(page);
 
        do {
                struct buffer_head *next = bh->b_this_page;
@@ -1845,9 +1828,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                        submit_bh(WRITE, bh);
                        nr_underway++;
                }
-               put_bh(bh);
                bh = next;
        } while (bh != head);
+       unlock_page(page);
 
        err = 0;
 done:
@@ -1886,7 +1869,6 @@ recover:
        bh = head;
        /* Recovery: lock and submit the mapped buffers */
        do {
-               get_bh(bh);
                if (buffer_mapped(bh) && buffer_dirty(bh)) {
                        lock_buffer(bh);
                        mark_buffer_async_write(bh);
@@ -1909,7 +1891,6 @@ recover:
                        submit_bh(WRITE, bh);
                        nr_underway++;
                }
-               put_bh(bh);
                bh = next;
        } while (bh != head);
        goto done;
@@ -1952,9 +1933,8 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
                if (!buffer_mapped(bh)) {
                        err = get_block(inode, block, bh, 1);
                        if (err)
-                               goto out;
+                               break;
                        if (buffer_new(bh)) {
-                               clear_buffer_new(bh);
                                unmap_underlying_metadata(bh->b_bdev,
                                                        bh->b_blocknr);
                                if (PageUptodate(page)) {
@@ -1994,10 +1974,17 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
        while(wait_bh > wait) {
                wait_on_buffer(*--wait_bh);
                if (!buffer_uptodate(*wait_bh))
-                       return -EIO;
+                       err = -EIO;
        }
-       return 0;
-out:
+       if (!err) {
+               bh = head;
+               do {
+                       if (buffer_new(bh))
+                               clear_buffer_new(bh);
+               } while ((bh = bh->b_this_page) != head);
+               return 0;
+       }
+       /* Error case: */
        /*
         * Zero out any newly allocated blocks to avoid exposing stale
         * data.  If BH_New is set, we know that the block was newly
@@ -2078,8 +2065,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
        int nr, i;
        int fully_mapped = 1;
 
-       if (!PageLocked(page))
-               PAGE_BUG(page);
+       BUG_ON(!PageLocked(page));
        blocksize = 1 << inode->i_blkbits;
        if (!page_has_buffers(page))
                create_empty_buffers(page, blocksize, 0);
@@ -2096,9 +2082,12 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
                        continue;
 
                if (!buffer_mapped(bh)) {
+                       int err = 0;
+
                        fully_mapped = 0;
                        if (iblock < lblock) {
-                               if (get_block(inode, iblock, bh, 0))
+                               err = get_block(inode, iblock, bh, 0);
+                               if (err)
                                        SetPageError(page);
                        }
                        if (!buffer_mapped(bh)) {
@@ -2106,7 +2095,8 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
                                memset(kaddr + i * blocksize, 0, blocksize);
                                flush_dcache_page(page);
                                kunmap_atomic(kaddr, KM_USER0);
-                               set_buffer_uptodate(bh);
+                               if (!err)
+                                       set_buffer_uptodate(bh);
                                continue;
                        }
                        /*
@@ -3115,7 +3105,7 @@ void __init buffer_init(void)
 
        bh_cachep = kmem_cache_create("buffer_head",
                        sizeof(struct buffer_head), 0,
-                       SLAB_PANIC, init_buffer_head, NULL);
+                       SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL);
 
        /*
         * Limit the bh occupancy to 10% of ZONE_NORMAL