X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=fs%2Fxfs%2Fxfs_inode.c;h=44dfac5212856567195068f9570e65f8857709c5;hb=dfd7a3db3898e299bdc25f0c77081a8632b3a73c;hp=86c1bf0bba9ea19bc0f38cdb98adfd76769ef053;hpb=075395d228641646159dae3dd170fa3fc6ff477a;p=powerpc.git diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 86c1bf0bba..44dfac5212 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -334,10 +334,9 @@ xfs_itobp( #if !defined(__KERNEL__) ni = 0; #elif defined(DEBUG) - ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : - (BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog); + ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog; #else /* usual case */ - ni = (imap_flags & XFS_IMAP_BULKSTAT) ? 0 : 1; + ni = 1; #endif for (i = 0; i < ni; i++) { @@ -348,11 +347,15 @@ xfs_itobp( (i << mp->m_sb.sb_inodelog)); di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC && XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT)); - if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP, - XFS_RANDOM_ITOBP_INOTOBP))) { + if (unlikely(XFS_TEST_ERROR(!di_ok, mp, + XFS_ERRTAG_ITOBP_INOTOBP, + XFS_RANDOM_ITOBP_INOTOBP))) { + if (imap_flags & XFS_IMAP_BULKSTAT) { + xfs_trans_brelse(tp, bp); + return XFS_ERROR(EINVAL); + } #ifdef DEBUG - if (!(imap_flags & XFS_IMAP_BULKSTAT)) - cmn_err(CE_ALERT, + cmn_err(CE_ALERT, "Device %s - bad inode magic/vsn " "daddr %lld #%d (magic=%x)", XFS_BUFTARG_NAME(mp->m_ddev_targp), @@ -851,7 +854,8 @@ xfs_iread( xfs_trans_t *tp, xfs_ino_t ino, xfs_inode_t **ipp, - xfs_daddr_t bno) + xfs_daddr_t bno, + uint imap_flags) { xfs_buf_t *bp; xfs_dinode_t *dip; @@ -863,6 +867,7 @@ xfs_iread( ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP); ip->i_ino = ino; ip->i_mount = mp; + spin_lock_init(&ip->i_flags_lock); /* * Get pointer's to the on-disk inode and the buffer containing it. @@ -871,7 +876,7 @@ xfs_iread( * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will * know that this is a new incore inode. */ - error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, 0); + error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags); if (error) { kmem_zone_free(xfs_inode_zone, ip); return error; @@ -1110,7 +1115,7 @@ xfs_ialloc( * to prevent others from looking at until we're done. */ error = xfs_trans_iget(tp->t_mountp, tp, ino, - IGET_CREATE, XFS_ILOCK_EXCL, &ip); + XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip); if (error != 0) { return error; } @@ -2188,7 +2193,7 @@ xfs_ifree_cluster( /* Inode not in memory or we found it already, * nothing to do */ - if (!ip || (ip->i_flags & XFS_ISTALE)) { + if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) { read_unlock(&ih->ih_lock); continue; } @@ -2210,8 +2215,7 @@ xfs_ifree_cluster( if (ip == free_ip) { if (xfs_iflock_nowait(ip)) { - ip->i_flags |= XFS_ISTALE; - + xfs_iflags_set(ip, XFS_ISTALE); if (xfs_inode_clean(ip)) { xfs_ifunlock(ip); } else { @@ -2224,7 +2228,7 @@ xfs_ifree_cluster( if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { if (xfs_iflock_nowait(ip)) { - ip->i_flags |= XFS_ISTALE; + xfs_iflags_set(ip, XFS_ISTALE); if (xfs_inode_clean(ip)) { xfs_ifunlock(ip); @@ -2254,7 +2258,7 @@ xfs_ifree_cluster( AIL_LOCK(mp,s); iip->ili_flush_lsn = iip->ili_item.li_lsn; AIL_UNLOCK(mp, s); - iip->ili_inode->i_flags |= XFS_ISTALE; + xfs_iflags_set(iip->ili_inode, XFS_ISTALE); pre_flushed++; } lip = lip->li_bio_list; @@ -2737,31 +2741,38 @@ xfs_iunpin( { ASSERT(atomic_read(&ip->i_pincount) > 0); - if (atomic_dec_and_test(&ip->i_pincount)) { + if (atomic_dec_and_lock(&ip->i_pincount, &ip->i_flags_lock)) { + /* - * If the inode is currently being reclaimed, the - * linux inode _and_ the xfs vnode may have been - * freed so we cannot reference either of them safely. - * Hence we should not try to do anything to them - * if the xfs inode is currently in the reclaim - * path. + * If the inode is currently being reclaimed, the link between + * the bhv_vnode and the xfs_inode will be broken after the + * XFS_IRECLAIM* flag is set. Hence, if these flags are not + * set, then we can move forward and mark the linux inode dirty + * knowing that it is still valid as it won't freed until after + * the bhv_vnode<->xfs_inode link is broken in xfs_reclaim. The + * i_flags_lock is used to synchronise the setting of the + * XFS_IRECLAIM* flags and the breaking of the link, and so we + * can execute atomically w.r.t to reclaim by holding this lock + * here. * - * However, we still need to issue the unpin wakeup - * call as the inode reclaim may be blocked waiting for - * the inode to become unpinned. + * However, we still need to issue the unpin wakeup call as the + * inode reclaim may be blocked waiting for the inode to become + * unpinned. */ - if (!(ip->i_flags & (XFS_IRECLAIM|XFS_IRECLAIMABLE))) { + + if (!__xfs_iflags_test(ip, XFS_IRECLAIM|XFS_IRECLAIMABLE)) { bhv_vnode_t *vp = XFS_ITOV_NULL(ip); + struct inode *inode = NULL; - /* make sync come back and flush this inode */ - if (vp) { - struct inode *inode = vn_to_inode(vp); + BUG_ON(vp == NULL); + inode = vn_to_inode(vp); + BUG_ON(inode->i_state & I_CLEAR); - if (!(inode->i_state & - (I_NEW|I_FREEING|I_CLEAR))) - mark_inode_dirty_sync(inode); - } + /* make sync come back and flush this inode */ + if (!(inode->i_state & (I_NEW|I_FREEING))) + mark_inode_dirty_sync(inode); } + spin_unlock(&ip->i_flags_lock); wake_up(&ip->i_ipin_wait); } }