Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[powerpc.git] / fs / ntfs / aops.c
index 1c0a431..7b2c8f4 100644 (file)
@@ -2,7 +2,7 @@
  * aops.c - NTFS kernel address space operations and page cache handling.
  *         Part of the Linux-NTFS project.
  *
- * Copyright (c) 2001-2005 Anton Altaparmakov
+ * Copyright (c) 2001-2006 Anton Altaparmakov
  * Copyright (c) 2002 Richard Russon
  *
  * This program/include file is free software; you can redistribute it and/or
@@ -22,6 +22,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/fs.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/swap.h>
@@ -200,8 +201,8 @@ static int ntfs_read_block(struct page *page)
        /* $MFT/$DATA must have its complete runlist in memory at all times. */
        BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
 
-       blocksize_bits = VFS_I(ni)->i_blkbits;
-       blocksize = 1 << blocksize_bits;
+       blocksize = vol->sb->s_blocksize;
+       blocksize_bits = vol->sb->s_blocksize_bits;
 
        if (!page_has_buffers(page)) {
                create_empty_buffers(page, blocksize, 0);
@@ -253,7 +254,7 @@ static int ntfs_read_block(struct page *page)
                bh->b_bdev = vol->sb->s_bdev;
                /* Is the block within the allowed limits? */
                if (iblock < lblock) {
-                       BOOL is_retry = FALSE;
+                       bool is_retry = false;
 
                        /* Convert iblock into corresponding vcn and offset. */
                        vcn = (VCN)iblock << blocksize_bits >>
@@ -291,7 +292,7 @@ lock_retry_remap:
                                goto handle_hole;
                        /* If first try and runlist unmapped, map and retry. */
                        if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
-                               is_retry = TRUE;
+                               is_retry = true;
                                /*
                                 * Attempt to map runlist, dropping lock for
                                 * the duration.
@@ -557,7 +558,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
        unsigned long flags;
        unsigned int blocksize, vcn_ofs;
        int err;
-       BOOL need_end_writeback;
+       bool need_end_writeback;
        unsigned char blocksize_bits;
 
        vi = page->mapping->host;
@@ -569,10 +570,8 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
 
        BUG_ON(!NInoNonResident(ni));
        BUG_ON(NInoMstProtected(ni));
-
-       blocksize_bits = vi->i_blkbits;
-       blocksize = 1 << blocksize_bits;
-
+       blocksize = vol->sb->s_blocksize;
+       blocksize_bits = vol->sb->s_blocksize_bits;
        if (!page_has_buffers(page)) {
                BUG_ON(!PageUptodate(page));
                create_empty_buffers(page, blocksize,
@@ -627,7 +626,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
        rl = NULL;
        err = 0;
        do {
-               BOOL is_retry = FALSE;
+               bool is_retry = false;
 
                if (unlikely(block >= dblock)) {
                        /*
@@ -769,7 +768,7 @@ lock_retry_remap:
                }
                /* If first try and runlist unmapped, map and retry. */
                if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
-                       is_retry = TRUE;
+                       is_retry = true;
                        /*
                         * Attempt to map runlist, dropping lock for
                         * the duration.
@@ -875,12 +874,12 @@ lock_retry_remap:
        set_page_writeback(page);       /* Keeps try_to_free_buffers() away. */
 
        /* Submit the prepared buffers for i/o. */
-       need_end_writeback = TRUE;
+       need_end_writeback = true;
        do {
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
                        submit_bh(WRITE, bh);
-                       need_end_writeback = FALSE;
+                       need_end_writeback = false;
                }
                bh = next;
        } while (bh != head);
@@ -933,7 +932,7 @@ static int ntfs_write_mst_block(struct page *page,
        runlist_element *rl;
        int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
        unsigned bh_size, rec_size_bits;
-       BOOL sync, is_mft, page_is_dirty, rec_is_dirty;
+       bool sync, is_mft, page_is_dirty, rec_is_dirty;
        unsigned char bh_size_bits;
 
        ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
@@ -949,8 +948,8 @@ static int ntfs_write_mst_block(struct page *page,
         */
        BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
                        (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
-       bh_size_bits = vi->i_blkbits;
-       bh_size = 1 << bh_size_bits;
+       bh_size = vol->sb->s_blocksize;
+       bh_size_bits = vol->sb->s_blocksize_bits;
        max_bhs = PAGE_CACHE_SIZE / bh_size;
        BUG_ON(!max_bhs);
        BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
@@ -976,10 +975,10 @@ static int ntfs_write_mst_block(struct page *page,
 
        rl = NULL;
        err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
-       page_is_dirty = rec_is_dirty = FALSE;
+       page_is_dirty = rec_is_dirty = false;
        rec_start_bh = NULL;
        do {
-               BOOL is_retry = FALSE;
+               bool is_retry = false;
 
                if (likely(block < rec_block)) {
                        if (unlikely(block >= dblock)) {
@@ -1010,10 +1009,10 @@ static int ntfs_write_mst_block(struct page *page,
                        }
                        if (!buffer_dirty(bh)) {
                                /* Clean records are not written out. */
-                               rec_is_dirty = FALSE;
+                               rec_is_dirty = false;
                                continue;
                        }
-                       rec_is_dirty = TRUE;
+                       rec_is_dirty = true;
                        rec_start_bh = bh;
                }
                /* Need to map the buffer if it is not mapped already. */
@@ -1054,7 +1053,7 @@ lock_retry_remap:
                                 */
                                if (!is_mft && !is_retry &&
                                                lcn == LCN_RL_NOT_MAPPED) {
-                                       is_retry = TRUE;
+                                       is_retry = true;
                                        /*
                                         * Attempt to map runlist, dropping
                                         * lock for the duration.
@@ -1064,7 +1063,7 @@ lock_retry_remap:
                                        if (likely(!err2))
                                                goto lock_retry_remap;
                                        if (err2 == -ENOMEM)
-                                               page_is_dirty = TRUE;
+                                               page_is_dirty = true;
                                        lcn = err2;
                                } else {
                                        err2 = -EIO;
@@ -1146,7 +1145,7 @@ lock_retry_remap:
                                 * means we need to redirty the page before
                                 * returning.
                                 */
-                               page_is_dirty = TRUE;
+                               page_is_dirty = true;
                                /*
                                 * Remove the buffers in this mft record from
                                 * the list of buffers to write.
@@ -1279,18 +1278,18 @@ unm_done:
                
                tni = locked_nis[nr_locked_nis];
                /* Get the base inode. */
-               down(&tni->extent_lock);
+               mutex_lock(&tni->extent_lock);
                if (tni->nr_extents >= 0)
                        base_tni = tni;
                else {
                        base_tni = tni->ext.base_ntfs_ino;
                        BUG_ON(!base_tni);
                }
-               up(&tni->extent_lock);
+               mutex_unlock(&tni->extent_lock);
                ntfs_debug("Unlocking %s inode 0x%lx.",
                                tni == base_tni ? "base" : "extent",
                                tni->mft_no);
-               up(&tni->mrec_lock);
+               mutex_unlock(&tni->mrec_lock);
                atomic_dec(&tni->count);
                iput(VFS_I(base_tni));
        }
@@ -1531,7 +1530,6 @@ err_out:
                                "error %i.", err);
                SetPageError(page);
                NVolSetErrors(ni->vol);
-               make_bad_inode(vi);
        }
        unlock_page(page);
        if (ctx)
@@ -1546,20 +1544,23 @@ err_out:
 /**
  * ntfs_aops - general address space operations for inodes and attributes
  */
-struct address_space_operations ntfs_aops = {
+const struct address_space_operations ntfs_aops = {
        .readpage       = ntfs_readpage,        /* Fill page with data. */
        .sync_page      = block_sync_page,      /* Currently, just unplugs the
                                                   disk request queue. */
 #ifdef NTFS_RW
        .writepage      = ntfs_writepage,       /* Write dirty page to disk. */
 #endif /* NTFS_RW */
+       .migratepage    = buffer_migrate_page,  /* Move a page cache page from
+                                                  one physical page to an
+                                                  other. */
 };
 
 /**
  * ntfs_mst_aops - general address space operations for mst protecteed inodes
  *                and attributes
  */
-struct address_space_operations ntfs_mst_aops = {
+const struct address_space_operations ntfs_mst_aops = {
        .readpage       = ntfs_readpage,        /* Fill page with data. */
        .sync_page      = block_sync_page,      /* Currently, just unplugs the
                                                   disk request queue. */
@@ -1569,6 +1570,9 @@ struct address_space_operations ntfs_mst_aops = {
                                                   without touching the buffers
                                                   belonging to the page. */
 #endif /* NTFS_RW */
+       .migratepage    = buffer_migrate_page,  /* Move a page cache page from
+                                                  one physical page to an
+                                                  other. */
 };
 
 #ifdef NTFS_RW
@@ -1596,7 +1600,7 @@ void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
 
        BUG_ON(!PageUptodate(page));
        end = ofs + ni->itype.index.block_size;
-       bh_size = 1 << VFS_I(ni)->i_blkbits;
+       bh_size = VFS_I(ni)->i_sb->s_blocksize;
        spin_lock(&mapping->private_lock);
        if (unlikely(!page_has_buffers(page))) {
                spin_unlock(&mapping->private_lock);