ahci: use 0x80 as wait stat value instead of 0xff
[powerpc.git] / fs / xfs / xfs_inode.c
index 1f8ecff..44dfac5 100644 (file)
@@ -854,7 +854,8 @@ xfs_iread(
        xfs_trans_t     *tp,
        xfs_ino_t       ino,
        xfs_inode_t     **ipp,
-       xfs_daddr_t     bno)
+       xfs_daddr_t     bno,
+       uint            imap_flags)
 {
        xfs_buf_t       *bp;
        xfs_dinode_t    *dip;
@@ -866,6 +867,7 @@ xfs_iread(
        ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP);
        ip->i_ino = ino;
        ip->i_mount = mp;
+       spin_lock_init(&ip->i_flags_lock);
 
        /*
         * Get pointer's to the on-disk inode and the buffer containing it.
@@ -874,7 +876,7 @@ xfs_iread(
         * return NULL as well.  Set i_blkno to 0 so that xfs_itobp() will
         * know that this is a new incore inode.
         */
-       error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, 0);
+       error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags);
        if (error) {
                kmem_zone_free(xfs_inode_zone, ip);
                return error;
@@ -1113,7 +1115,7 @@ xfs_ialloc(
         * to prevent others from looking at until we're done.
         */
        error = xfs_trans_iget(tp->t_mountp, tp, ino,
-                       IGET_CREATE, XFS_ILOCK_EXCL, &ip);
+                               XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
        if (error != 0) {
                return error;
        }
@@ -2191,7 +2193,7 @@ xfs_ifree_cluster(
                        /* Inode not in memory or we found it already,
                         * nothing to do
                         */
-                       if (!ip || (ip->i_flags & XFS_ISTALE)) {
+                       if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
                                read_unlock(&ih->ih_lock);
                                continue;
                        }
@@ -2213,8 +2215,7 @@ xfs_ifree_cluster(
 
                        if (ip == free_ip) {
                                if (xfs_iflock_nowait(ip)) {
-                                       ip->i_flags |= XFS_ISTALE;
-
+                                       xfs_iflags_set(ip, XFS_ISTALE);
                                        if (xfs_inode_clean(ip)) {
                                                xfs_ifunlock(ip);
                                        } else {
@@ -2227,7 +2228,7 @@ xfs_ifree_cluster(
 
                        if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
                                if (xfs_iflock_nowait(ip)) {
-                                       ip->i_flags |= XFS_ISTALE;
+                                       xfs_iflags_set(ip, XFS_ISTALE);
 
                                        if (xfs_inode_clean(ip)) {
                                                xfs_ifunlock(ip);
@@ -2257,7 +2258,7 @@ xfs_ifree_cluster(
                                AIL_LOCK(mp,s);
                                iip->ili_flush_lsn = iip->ili_item.li_lsn;
                                AIL_UNLOCK(mp, s);
-                               iip->ili_inode->i_flags |= XFS_ISTALE;
+                               xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
                                pre_flushed++;
                        }
                        lip = lip->li_bio_list;
@@ -2740,31 +2741,38 @@ xfs_iunpin(
 {
        ASSERT(atomic_read(&ip->i_pincount) > 0);
 
-       if (atomic_dec_and_test(&ip->i_pincount)) {
+       if (atomic_dec_and_lock(&ip->i_pincount, &ip->i_flags_lock)) {
+
                /*
-                * If the inode is currently being reclaimed, the
-                * linux inode _and_ the xfs vnode may have been
-                * freed so we cannot reference either of them safely.
-                * Hence we should not try to do anything to them
-                * if the xfs inode is currently in the reclaim
-                * path.
+                * If the inode is currently being reclaimed, the link between
+                * the bhv_vnode and the xfs_inode will be broken after the
+                * XFS_IRECLAIM* flag is set. Hence, if these flags are not
+                * set, then we can move forward and mark the linux inode dirty
+                * knowing that it is still valid as it won't freed until after
+                * the bhv_vnode<->xfs_inode link is broken in xfs_reclaim. The
+                * i_flags_lock is used to synchronise the setting of the
+                * XFS_IRECLAIM* flags and the breaking of the link, and so we
+                * can execute atomically w.r.t to reclaim by holding this lock
+                * here.
                 *
-                * However, we still need to issue the unpin wakeup
-                * call as the inode reclaim may be blocked waiting for
-                * the inode to become unpinned.
+                * However, we still need to issue the unpin wakeup call as the
+                * inode reclaim may be blocked waiting for the inode to become
+                * unpinned.
                 */
-               if (!(ip->i_flags & (XFS_IRECLAIM|XFS_IRECLAIMABLE))) {
+
+               if (!__xfs_iflags_test(ip, XFS_IRECLAIM|XFS_IRECLAIMABLE)) {
                        bhv_vnode_t     *vp = XFS_ITOV_NULL(ip);
+                       struct inode *inode = NULL;
 
-                       /* make sync come back and flush this inode */
-                       if (vp) {
-                               struct inode    *inode = vn_to_inode(vp);
+                       BUG_ON(vp == NULL);
+                       inode = vn_to_inode(vp);
+                       BUG_ON(inode->i_state & I_CLEAR);
 
-                               if (!(inode->i_state &
-                                               (I_NEW|I_FREEING|I_CLEAR)))
-                                       mark_inode_dirty_sync(inode);
-                       }
+                       /* make sync come back and flush this inode */
+                       if (!(inode->i_state & (I_NEW|I_FREEING)))
+                               mark_inode_dirty_sync(inode);
                }
+               spin_unlock(&ip->i_flags_lock);
                wake_up(&ip->i_ipin_wait);
        }
 }