rename thread_info to stack
[powerpc.git] / fs / dcache.c
index a1ff91e..0e73aa0 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/fsnotify.h>
 #include <linux/slab.h>
 #include <linux/init.h>
-#include <linux/smp_lock.h>
 #include <linux/hash.h>
 #include <linux/cache.h>
 #include <linux/module.h>
@@ -43,7 +42,7 @@ static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
 
 EXPORT_SYMBOL(dcache_lock);
 
-static kmem_cache_t *dentry_cache __read_mostly;
+static struct kmem_cache *dentry_cache __read_mostly;
 
 #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
 
@@ -68,15 +67,19 @@ struct dentry_stat_t dentry_stat = {
        .age_limit = 45,
 };
 
-static void d_callback(struct rcu_head *head)
+static void __d_free(struct dentry *dentry)
 {
-       struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
-
        if (dname_external(dentry))
                kfree(dentry->d_name.name);
        kmem_cache_free(dentry_cache, dentry); 
 }
 
+static void d_callback(struct rcu_head *head)
+{
+       struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu);
+       __d_free(dentry);
+}
+
 /*
  * no dcache_lock, please.  The caller must decrement dentry_stat.nr_dentry
  * inside dcache_lock.
@@ -85,7 +88,11 @@ static void d_free(struct dentry *dentry)
 {
        if (dentry->d_op && dentry->d_op->d_release)
                dentry->d_op->d_release(dentry);
-       call_rcu(&dentry->d_u.d_rcu, d_callback);
+       /* if dentry was never inserted into hash, immediate free is OK */
+       if (dentry->d_hash.pprev == NULL)
+               __d_free(dentry);
+       else
+               call_rcu(&dentry->d_u.d_rcu, d_callback);
 }
 
 /*
@@ -113,6 +120,28 @@ static void dentry_iput(struct dentry * dentry)
        }
 }
 
+/**
+ * d_kill - kill dentry and return parent
+ * @dentry: dentry to kill
+ *
+ * Called with dcache_lock and d_lock, releases both.  The dentry must
+ * already be unhashed and removed from the LRU.
+ *
+ * If this is the root of the dentry tree, return NULL.
+ */
+static struct dentry *d_kill(struct dentry *dentry)
+{
+       struct dentry *parent;
+
+       list_del(&dentry->d_u.d_child);
+       dentry_stat.nr_dentry--;        /* For d_free, below */
+       /*drops the locks, at that point nobody can reach this dentry */
+       dentry_iput(dentry);
+       parent = dentry->d_parent;
+       d_free(dentry);
+       return dentry == parent ? NULL : parent;
+}
+
 /* 
  * This is dput
  *
@@ -181,28 +210,17 @@ repeat:
 
 unhash_it:
        __d_drop(dentry);
-
-kill_it: {
-               struct dentry *parent;
-
-               /* If dentry was on d_lru list
-                * delete it from there
-                */
-               if (!list_empty(&dentry->d_lru)) {
-                       list_del(&dentry->d_lru);
-                       dentry_stat.nr_unused--;
-               }
-               list_del(&dentry->d_u.d_child);
-               dentry_stat.nr_dentry--;        /* For d_free, below */
-               /*drops the locks, at that point nobody can reach this dentry */
-               dentry_iput(dentry);
-               parent = dentry->d_parent;
-               d_free(dentry);
-               if (dentry == parent)
-                       return;
-               dentry = parent;
-               goto repeat;
+kill_it:
+       /* If dentry was on d_lru list
+        * delete it from there
+        */
+       if (!list_empty(&dentry->d_lru)) {
+               list_del(&dentry->d_lru);
+               dentry_stat.nr_unused--;
        }
+       dentry = d_kill(dentry);
+       if (dentry)
+               goto repeat;
 }
 
 /**
@@ -363,22 +381,40 @@ restart:
  * Throw away a dentry - free the inode, dput the parent.  This requires that
  * the LRU list has already been removed.
  *
+ * If prune_parents is true, try to prune ancestors as well.
+ *
  * Called with dcache_lock, drops it and then regains.
  * Called with dentry->d_lock held, drops it.
  */
-static void prune_one_dentry(struct dentry * dentry)
+static void prune_one_dentry(struct dentry * dentry, int prune_parents)
 {
-       struct dentry * parent;
-
        __d_drop(dentry);
-       list_del(&dentry->d_u.d_child);
-       dentry_stat.nr_dentry--;        /* For d_free, below */
-       dentry_iput(dentry);
-       parent = dentry->d_parent;
-       d_free(dentry);
-       if (parent != dentry)
-               dput(parent);
+       dentry = d_kill(dentry);
+       if (!prune_parents) {
+               dput(dentry);
+               spin_lock(&dcache_lock);
+               return;
+       }
+
+       /*
+        * Prune ancestors.  Locking is simpler than in dput(),
+        * because dcache_lock needs to be taken anyway.
+        */
        spin_lock(&dcache_lock);
+       while (dentry) {
+               if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock))
+                       return;
+
+               if (dentry->d_op && dentry->d_op->d_delete)
+                       dentry->d_op->d_delete(dentry);
+               if (!list_empty(&dentry->d_lru)) {
+                       list_del(&dentry->d_lru);
+                       dentry_stat.nr_unused--;
+               }
+               __d_drop(dentry);
+               dentry = d_kill(dentry);
+               spin_lock(&dcache_lock);
+       }
 }
 
 /**
@@ -386,6 +422,7 @@ static void prune_one_dentry(struct dentry * dentry)
  * @count: number of entries to try and free
  * @sb: if given, ignore dentries for other superblocks
  *         which are being unmounted.
+ * @prune_parents: if true, try to prune ancestors as well in one go
  *
  * Shrink the dcache. This is done when we need
  * more memory, or simply when we need to unmount
@@ -396,7 +433,7 @@ static void prune_one_dentry(struct dentry * dentry)
  * all the dentries are in use.
  */
  
-static void prune_dcache(int count, struct super_block *sb)
+static void prune_dcache(int count, struct super_block *sb, int prune_parents)
 {
        spin_lock(&dcache_lock);
        for (; count ; count--) {
@@ -456,7 +493,7 @@ static void prune_dcache(int count, struct super_block *sb)
                 * without taking the s_umount lock (I already hold it).
                 */
                if (sb && dentry->d_sb == sb) {
-                       prune_one_dentry(dentry);
+                       prune_one_dentry(dentry, prune_parents);
                        continue;
                }
                /*
@@ -471,18 +508,19 @@ static void prune_dcache(int count, struct super_block *sb)
                s_umount = &dentry->d_sb->s_umount;
                if (down_read_trylock(s_umount)) {
                        if (dentry->d_sb->s_root != NULL) {
-                               prune_one_dentry(dentry);
+                               prune_one_dentry(dentry, prune_parents);
                                up_read(s_umount);
                                continue;
                        }
                        up_read(s_umount);
                }
                spin_unlock(&dentry->d_lock);
-               /* Cannot remove the first dentry, and it isn't appropriate
-                * to move it to the head of the list, so give up, and try
-                * later
+               /*
+                * Insert dentry at the head of the list as inserting at the
+                * tail leads to a cycle.
                 */
-               break;
+               list_add(&dentry->d_lru, &dentry_unused);
+               dentry_stat.nr_unused++;
        }
        spin_unlock(&dcache_lock);
 }
@@ -541,7 +579,7 @@ repeat:
                        spin_unlock(&dentry->d_lock);
                        continue;
                }
-               prune_one_dentry(dentry);
+               prune_one_dentry(dentry, 1);
                cond_resched_lock(&dcache_lock);
                goto repeat;
        }
@@ -556,6 +594,7 @@ repeat:
 static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
 {
        struct dentry *parent;
+       unsigned detached = 0;
 
        BUG_ON(!IS_ROOT(dentry));
 
@@ -620,7 +659,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
                                atomic_dec(&parent->d_count);
 
                        list_del(&dentry->d_u.d_child);
-                       dentry_stat.nr_dentry--;        /* For d_free, below */
+                       detached++;
 
                        inode = dentry->d_inode;
                        if (inode) {
@@ -638,7 +677,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
                         * otherwise we ascend to the parent and move to the
                         * next sibling if there is one */
                        if (!parent)
-                               return;
+                               goto out;
 
                        dentry = parent;
 
@@ -647,6 +686,11 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
                dentry = list_entry(dentry->d_subdirs.next,
                                    struct dentry, d_u.d_child);
        }
+out:
+       /* several dentries were freed, need to correct nr_dentry */
+       spin_lock(&dcache_lock);
+       dentry_stat.nr_dentry -= detached;
+       spin_unlock(&dcache_lock);
 }
 
 /*
@@ -814,7 +858,7 @@ void shrink_dcache_parent(struct dentry * parent)
        int found;
 
        while ((found = select_parent(parent)) != 0)
-               prune_dcache(found, parent->d_sb);
+               prune_dcache(found, parent->d_sb, 1);
 }
 
 /*
@@ -834,7 +878,7 @@ static int shrink_dcache_memory(int nr, gfp_t gfp_mask)
        if (nr) {
                if (!(gfp_mask & __GFP_FS))
                        return -1;
-               prune_dcache(nr, NULL);
+               prune_dcache(nr, NULL, 1);
        }
        return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
 }
@@ -1808,6 +1852,16 @@ char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
        struct vfsmount *rootmnt;
        struct dentry *root;
 
+       /*
+        * We have various synthetic filesystems that never get mounted.  On
+        * these filesystems dentries are never used for lookup purposes, and
+        * thus don't need to be hashed.  They also don't need a name until a
+        * user wants to identify the object in /proc/pid/fd/.  The little hack
+        * below allows us to generate a name for these objects on demand:
+        */
+       if (dentry->d_op && dentry->d_op->d_dname)
+               return dentry->d_op->d_dname(dentry, buf, buflen);
+
        read_lock(&current->fs->lock);
        rootmnt = mntget(current->fs->rootmnt);
        root = dget(current->fs->root);
@@ -1820,6 +1874,27 @@ char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
        return res;
 }
 
+/*
+ * Helper function for dentry_operations.d_dname() members
+ */
+char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
+                       const char *fmt, ...)
+{
+       va_list args;
+       char temp[64];
+       int sz;
+
+       va_start(args, fmt);
+       sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
+       va_end(args);
+
+       if (sz > sizeof(temp) || sz > buflen)
+               return ERR_PTR(-ENAMETOOLONG);
+
+       buffer += buflen - sz;
+       return memcpy(buffer, temp, sz);
+}
+
 /*
  * NOTE! The user-level library version returns a
  * character pointer. The kernel system call just
@@ -2037,12 +2112,8 @@ static void __init dcache_init(unsigned long mempages)
         * but it is probably not worth it because of the cache nature
         * of the dcache. 
         */
-       dentry_cache = kmem_cache_create("dentry_cache",
-                                        sizeof(struct dentry),
-                                        0,
-                                        (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
-                                        SLAB_MEM_SPREAD),
-                                        NULL, NULL);
+       dentry_cache = KMEM_CACHE(dentry,
+               SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
        
        set_shrinker(DEFAULT_SEEKS, shrink_dcache_memory);
 
@@ -2065,10 +2136,10 @@ static void __init dcache_init(unsigned long mempages)
 }
 
 /* SLAB cache for __getname() consumers */
-kmem_cache_t *names_cachep __read_mostly;
+struct kmem_cache *names_cachep __read_mostly;
 
 /* SLAB cache for file structures */
-kmem_cache_t *filp_cachep __read_mostly;
+struct kmem_cache *filp_cachep __read_mostly;
 
 EXPORT_SYMBOL(d_genocide);