4 * (C) 1997 Linus Torvalds
7 #include <linux/config.h>
9 #include <linux/string.h>
11 #include <linux/dcache.h>
12 #include <linux/init.h>
13 #include <linux/quotaops.h>
14 #include <linux/slab.h>
15 #include <linux/cache.h>
16 #include <linux/swap.h>
17 #include <linux/swapctl.h>
18 #include <linux/prefetch.h>
19 #include <linux/locks.h>
22 * New inode.c implementation.
24 * This implementation has the basic premise of trying
25 * to be extremely low-overhead and SMP-safe, yet be
26 * simple enough to be "obviously correct".
31 /* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */
33 /* #define INODE_PARANOIA 1 */
34 /* #define INODE_DEBUG 1 */
37 * Inode lookup is no longer as critical as it used to be:
38 * most of the lookups are going to be through the dcache.
40 #define I_HASHBITS i_hash_shift
41 #define I_HASHMASK i_hash_mask
43 static unsigned int i_hash_mask;
44 static unsigned int i_hash_shift;
47 * Each inode can be on two separate lists. One is
48 * the hash list of the inode, used for lookups. The
49 * other linked list is the "type" list:
50 * "in_use" - valid inode, i_count > 0, i_nlink > 0
51 * "dirty" - as "in_use" but also dirty
52 * "unused" - valid inode, i_count = 0
54 * A "dirty" list is maintained for each super block,
55 * allowing for low-overhead inode sync() operations.
58 static LIST_HEAD(inode_in_use);
59 static LIST_HEAD(inode_unused);
60 static struct list_head *inode_hashtable;
61 static LIST_HEAD(anon_hash_chain); /* for inodes with NULL i_sb */
64 * A simple spinlock to protect the list manipulations.
66 * NOTE! You also have to own the lock if you change
67 * the i_state of an inode while it is in use..
69 static spinlock_t inode_lock = SPIN_LOCK_UNLOCKED;
72 * Statistics gathering..
74 struct inodes_stat_t inodes_stat;
76 static kmem_cache_t * inode_cachep;
78 static struct inode *alloc_inode(struct super_block *sb)
80 static struct address_space_operations empty_aops;
81 static struct inode_operations empty_iops;
82 static struct file_operations empty_fops;
85 if (sb->s_op->alloc_inode)
86 inode = sb->s_op->alloc_inode(sb);
88 inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL);
91 memset(&inode->u, 0, sizeof(inode->u));
95 struct address_space * const mapping = &inode->i_data;
98 inode->i_dev = sb->s_dev;
99 inode->i_blkbits = sb->s_blocksize_bits;
101 atomic_set(&inode->i_count, 1);
103 inode->i_op = &empty_iops;
104 inode->i_fop = &empty_fops;
106 atomic_set(&inode->i_writecount, 0);
109 inode->i_generation = 0;
110 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
111 inode->i_pipe = NULL;
112 inode->i_bdev = NULL;
113 inode->i_cdev = NULL;
115 mapping->a_ops = &empty_aops;
116 mapping->host = inode;
117 mapping->gfp_mask = GFP_HIGHUSER;
118 inode->i_mapping = mapping;
123 static void destroy_inode(struct inode *inode)
125 if (inode_has_buffers(inode))
127 if (inode->i_sb->s_op->destroy_inode)
128 inode->i_sb->s_op->destroy_inode(inode);
130 kmem_cache_free(inode_cachep, inode);
135 * These are initializations that only need to be done
136 * once, because the fields are idempotent across use
137 * of the inode, so let the slab aware of that.
139 void inode_init_once(struct inode *inode)
141 memset(inode, 0, sizeof(*inode));
142 init_waitqueue_head(&inode->i_wait);
143 INIT_LIST_HEAD(&inode->i_hash);
144 INIT_LIST_HEAD(&inode->i_data.clean_pages);
145 INIT_LIST_HEAD(&inode->i_data.dirty_pages);
146 INIT_LIST_HEAD(&inode->i_data.locked_pages);
147 INIT_LIST_HEAD(&inode->i_dentry);
148 INIT_LIST_HEAD(&inode->i_dirty_buffers);
149 INIT_LIST_HEAD(&inode->i_dirty_data_buffers);
150 INIT_LIST_HEAD(&inode->i_devices);
151 sema_init(&inode->i_sem, 1);
152 sema_init(&inode->i_zombie, 1);
153 spin_lock_init(&inode->i_data.i_shared_lock);
156 static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
158 struct inode * inode = (struct inode *) foo;
160 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
161 SLAB_CTOR_CONSTRUCTOR)
162 inode_init_once(inode);
166 * Put the inode on the super block's dirty list.
168 * CAREFUL! We mark it dirty unconditionally, but
169 * move it onto the dirty list only if it is hashed.
170 * If it was not hashed, it will never be added to
171 * the dirty list even if it is later hashed, as it
172 * will have been marked dirty already.
174 * In short, make sure you hash any inodes _before_
175 * you start marking them dirty..
179 * __mark_inode_dirty - internal function
180 * @inode: inode to mark
181 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
182 * Mark an inode as dirty. Callers should use mark_inode_dirty or
183 * mark_inode_dirty_sync.
186 void __mark_inode_dirty(struct inode *inode, int flags)
188 struct super_block * sb = inode->i_sb;
193 /* Don't do this for I_DIRTY_PAGES - that doesn't actually dirty the inode itself */
194 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
195 if (sb->s_op && sb->s_op->dirty_inode)
196 sb->s_op->dirty_inode(inode);
199 /* avoid the locking if we can */
200 if ((inode->i_state & flags) == flags)
203 spin_lock(&inode_lock);
204 if ((inode->i_state & flags) != flags) {
205 inode->i_state |= flags;
206 /* Only add valid (ie hashed) inodes to the dirty list */
207 if (!(inode->i_state & I_LOCK) && !list_empty(&inode->i_hash)) {
208 list_del(&inode->i_list);
209 list_add(&inode->i_list, &sb->s_dirty);
212 spin_unlock(&inode_lock);
215 static void __wait_on_inode(struct inode * inode)
217 DECLARE_WAITQUEUE(wait, current);
219 add_wait_queue(&inode->i_wait, &wait);
221 set_current_state(TASK_UNINTERRUPTIBLE);
222 if (inode->i_state & I_LOCK) {
226 remove_wait_queue(&inode->i_wait, &wait);
227 current->state = TASK_RUNNING;
230 static inline void wait_on_inode(struct inode *inode)
232 if (inode->i_state & I_LOCK)
233 __wait_on_inode(inode);
237 static inline void write_inode(struct inode *inode, int sync)
239 if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
240 inode->i_sb->s_op->write_inode(inode, sync);
243 static inline void __iget(struct inode * inode)
245 if (atomic_read(&inode->i_count)) {
246 atomic_inc(&inode->i_count);
249 atomic_inc(&inode->i_count);
250 if (!(inode->i_state & (I_DIRTY|I_LOCK))) {
251 list_del(&inode->i_list);
252 list_add(&inode->i_list, &inode_in_use);
254 inodes_stat.nr_unused--;
257 static inline void __sync_one(struct inode *inode, int sync)
261 list_del(&inode->i_list);
262 list_add(&inode->i_list, &inode->i_sb->s_locked_inodes);
264 if (inode->i_state & I_LOCK)
267 /* Set I_LOCK, reset I_DIRTY */
268 dirty = inode->i_state & I_DIRTY;
269 inode->i_state |= I_LOCK;
270 inode->i_state &= ~I_DIRTY;
271 spin_unlock(&inode_lock);
273 filemap_fdatasync(inode->i_mapping);
275 /* Don't write the inode if only I_DIRTY_PAGES was set */
276 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC))
277 write_inode(inode, sync);
279 filemap_fdatawait(inode->i_mapping);
281 spin_lock(&inode_lock);
282 inode->i_state &= ~I_LOCK;
283 if (!(inode->i_state & I_FREEING)) {
284 struct list_head *to;
285 if (inode->i_state & I_DIRTY)
286 to = &inode->i_sb->s_dirty;
287 else if (atomic_read(&inode->i_count))
291 list_del(&inode->i_list);
292 list_add(&inode->i_list, to);
294 wake_up(&inode->i_wait);
297 static inline void sync_one(struct inode *inode, int sync)
299 while (inode->i_state & I_LOCK) {
301 spin_unlock(&inode_lock);
302 __wait_on_inode(inode);
304 spin_lock(&inode_lock);
307 __sync_one(inode, sync);
310 static inline void sync_list(struct list_head *head)
312 struct list_head * tmp;
314 while ((tmp = head->prev) != head)
315 __sync_one(list_entry(tmp, struct inode, i_list), 0);
318 static inline void wait_on_locked(struct list_head *head)
320 struct list_head * tmp;
321 while ((tmp = head->prev) != head) {
322 struct inode *inode = list_entry(tmp, struct inode, i_list);
324 spin_unlock(&inode_lock);
325 __wait_on_inode(inode);
327 spin_lock(&inode_lock);
331 static inline int try_to_sync_unused_list(struct list_head *head, int nr_inodes)
333 struct list_head *tmp = head;
336 while (nr_inodes && (tmp = tmp->prev) != head) {
337 inode = list_entry(tmp, struct inode, i_list);
339 if (!atomic_read(&inode->i_count)) {
340 __sync_one(inode, 0);
344 * __sync_one moved the inode to another list,
345 * so we have to start looking from the list head.
354 void sync_inodes_sb(struct super_block *sb)
356 spin_lock(&inode_lock);
357 while (!list_empty(&sb->s_dirty)||!list_empty(&sb->s_locked_inodes)) {
358 sync_list(&sb->s_dirty);
359 wait_on_locked(&sb->s_locked_inodes);
361 spin_unlock(&inode_lock);
366 * We don't need to grab a reference to superblock here. If it has non-empty
367 * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
368 * past sync_inodes_sb() until both ->s_dirty and ->s_locked_inodes are
369 * empty. Since __sync_one() regains inode_lock before it finally moves
370 * inode from superblock lists we are OK.
373 void sync_unlocked_inodes(void)
375 struct super_block * sb;
376 spin_lock(&inode_lock);
378 sb = sb_entry(super_blocks.next);
379 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
380 if (!list_empty(&sb->s_dirty)) {
381 spin_unlock(&sb_lock);
382 sync_list(&sb->s_dirty);
386 spin_unlock(&sb_lock);
387 spin_unlock(&inode_lock);
391 * Find a superblock with inodes that need to be synced
394 static struct super_block *get_super_to_sync(void)
398 spin_lock(&inode_lock);
400 list_for_each(p, &super_blocks) {
401 struct super_block *s = list_entry(p,struct super_block,s_list);
402 if (list_empty(&s->s_dirty) && list_empty(&s->s_locked_inodes))
405 spin_unlock(&sb_lock);
406 spin_unlock(&inode_lock);
407 down_read(&s->s_umount);
414 spin_unlock(&sb_lock);
415 spin_unlock(&inode_lock);
421 * @dev: device to sync the inodes from.
423 * sync_inodes goes through the super block's dirty list,
424 * writes them out, and puts them back on the normal list.
427 void sync_inodes(kdev_t dev)
429 struct super_block * s;
432 * Search the super_blocks array for the device(s) to sync.
435 if ((s = get_super(dev)) != NULL) {
440 while ((s = get_super_to_sync()) != NULL) {
447 static void try_to_sync_unused_inodes(void * arg)
449 struct super_block * sb;
450 int nr_inodes = inodes_stat.nr_unused;
452 spin_lock(&inode_lock);
454 sb = sb_entry(super_blocks.next);
455 for (; nr_inodes && sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
456 if (list_empty(&sb->s_dirty))
458 spin_unlock(&sb_lock);
459 nr_inodes = try_to_sync_unused_list(&sb->s_dirty, nr_inodes);
462 spin_unlock(&sb_lock);
463 spin_unlock(&inode_lock);
466 static struct tq_struct unused_inodes_flush_task;
469 * write_inode_now - write an inode to disk
470 * @inode: inode to write to disk
471 * @sync: whether the write should be synchronous or not
473 * This function commits an inode to disk immediately if it is
474 * dirty. This is primarily needed by knfsd.
477 void write_inode_now(struct inode *inode, int sync)
479 struct super_block * sb = inode->i_sb;
482 spin_lock(&inode_lock);
483 while (inode->i_state & I_DIRTY)
484 sync_one(inode, sync);
485 spin_unlock(&inode_lock);
487 wait_on_inode(inode);
490 printk(KERN_ERR "write_inode_now: no super block\n");
494 * generic_osync_inode - flush all dirty data for a given inode to disk
495 * @inode: inode to write
496 * @datasync: if set, don't bother flushing timestamps
498 * This can be called by file_write functions for files which have the
499 * O_SYNC flag set, to flush dirty writes to disk.
502 int generic_osync_inode(struct inode *inode, int what)
504 int err = 0, err2 = 0, need_write_inode_now = 0;
509 * Currently, the filesystem write path does not pass the
510 * filp down to the low-level write functions. Therefore it
511 * is impossible for (say) __block_commit_write to know if
512 * the operation is O_SYNC or not.
514 * Ideally, O_SYNC writes would have the filesystem call
515 * ll_rw_block as it went to kick-start the writes, and we
516 * could call osync_inode_buffers() here to wait only for
517 * those IOs which have already been submitted to the device
518 * driver layer. As it stands, if we did this we'd not write
519 * anything to disk since our writes have not been queued by
520 * this point: they are still on the dirty LRU.
522 * So, currently we will call fsync_inode_buffers() instead,
523 * to flush _all_ dirty buffers for this inode to disk on
524 * every O_SYNC write, not just the synchronous I/Os. --sct
527 if (what & OSYNC_METADATA)
528 err = fsync_inode_buffers(inode);
529 if (what & OSYNC_DATA)
530 err2 = fsync_inode_data_buffers(inode);
534 spin_lock(&inode_lock);
535 if ((inode->i_state & I_DIRTY) &&
536 ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
537 need_write_inode_now = 1;
538 spin_unlock(&inode_lock);
540 if (need_write_inode_now)
541 write_inode_now(inode, 1);
543 wait_on_inode(inode);
549 * clear_inode - clear an inode
550 * @inode: inode to clear
552 * This is called by the filesystem to tell us
553 * that the inode is no longer useful. We just
554 * terminate it with extreme prejudice.
557 void clear_inode(struct inode *inode)
559 invalidate_inode_buffers(inode);
561 if (inode->i_data.nrpages)
563 if (!(inode->i_state & I_FREEING))
565 if (inode->i_state & I_CLEAR)
567 wait_on_inode(inode);
569 if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->clear_inode)
570 inode->i_sb->s_op->clear_inode(inode);
573 else if (inode->i_cdev) {
574 cdput(inode->i_cdev);
575 inode->i_cdev = NULL;
577 inode->i_state = I_CLEAR;
581 * Dispose-list gets a local list with local inodes in it, so it doesn't
582 * need to worry about list corruption and SMP locks.
584 static void dispose_list(struct list_head * head)
586 struct list_head * inode_entry;
587 struct inode * inode;
589 while ((inode_entry = head->next) != head)
591 list_del(inode_entry);
593 inode = list_entry(inode_entry, struct inode, i_list);
594 if (inode->i_data.nrpages)
595 truncate_inode_pages(&inode->i_data, 0);
597 destroy_inode(inode);
598 inodes_stat.nr_inodes--;
603 * Invalidate all inodes for a device.
605 static int invalidate_list(struct list_head *head, struct super_block * sb, struct list_head * dispose)
607 struct list_head *next;
608 int busy = 0, count = 0;
612 struct list_head * tmp = next;
613 struct inode * inode;
618 inode = list_entry(tmp, struct inode, i_list);
619 if (inode->i_sb != sb)
621 invalidate_inode_buffers(inode);
622 if (!atomic_read(&inode->i_count)) {
623 list_del_init(&inode->i_hash);
624 list_del(&inode->i_list);
625 list_add(&inode->i_list, dispose);
626 inode->i_state |= I_FREEING;
632 /* only unused inodes may be cached with i_count zero */
633 inodes_stat.nr_unused -= count;
638 * This is a two-stage process. First we collect all
639 * offending inodes onto the throw-away list, and in
640 * the second stage we actually dispose of them. This
641 * is because we don't want to sleep while messing
642 * with the global lists..
646 * invalidate_inodes - discard the inodes on a device
649 * Discard all of the inodes for a given superblock. If the discard
650 * fails because there are busy inodes then a non zero value is returned.
651 * If the discard is successful all the inodes have been discarded.
654 int invalidate_inodes(struct super_block * sb)
657 LIST_HEAD(throw_away);
659 spin_lock(&inode_lock);
660 busy = invalidate_list(&inode_in_use, sb, &throw_away);
661 busy |= invalidate_list(&inode_unused, sb, &throw_away);
662 busy |= invalidate_list(&sb->s_dirty, sb, &throw_away);
663 busy |= invalidate_list(&sb->s_locked_inodes, sb, &throw_away);
664 spin_unlock(&inode_lock);
666 dispose_list(&throw_away);
671 int invalidate_device(kdev_t dev, int do_sync)
673 struct super_block *sb;
683 * no need to lock the super, get_super holds the
684 * read semaphore so the filesystem cannot go away
685 * under us (->put_super runs with the write lock
688 shrink_dcache_sb(sb);
689 res = invalidate_inodes(sb);
692 invalidate_buffers(dev);
698 * This is called with the inode lock held. It searches
699 * the in-use for freeable inodes, which are moved to a
700 * temporary list and then placed on the unused list by
703 * We don't expect to have to call this very often.
705 * N.B. The spinlock is released during the call to
708 #define CAN_UNUSE(inode) \
709 ((((inode)->i_state | (inode)->i_data.nrpages) == 0) && \
710 !inode_has_buffers(inode))
711 #define INODE(entry) (list_entry(entry, struct inode, i_list))
713 void prune_icache(int goal)
716 struct list_head *entry, *freeable = &list;
718 struct inode * inode;
720 spin_lock(&inode_lock);
723 entry = inode_unused.prev;
724 while (entry != &inode_unused)
726 struct list_head *tmp = entry;
730 if (inode->i_state & (I_FREEING|I_CLEAR|I_LOCK))
732 if (!CAN_UNUSE(inode))
734 if (atomic_read(&inode->i_count))
737 list_del(&inode->i_hash);
738 INIT_LIST_HEAD(&inode->i_hash);
739 list_add(tmp, freeable);
740 inode->i_state |= I_FREEING;
745 inodes_stat.nr_unused -= count;
746 spin_unlock(&inode_lock);
748 dispose_list(freeable);
751 * If we didn't freed enough clean inodes schedule
752 * a sync of the dirty inodes, we cannot do it
753 * from here or we're either synchronously dogslow
754 * or we deadlock with oom.
757 schedule_task(&unused_inodes_flush_task);
760 int shrink_icache_memory(int priority, int gfp_mask)
765 * Nasty deadlock avoidance..
767 * We may hold various FS locks, and we don't
768 * want to recurse into the FS that called us
769 * in clear_inode() and friends..
771 if (!(gfp_mask & __GFP_FS))
774 count = inodes_stat.nr_unused / priority;
777 return kmem_cache_shrink(inode_cachep);
781 * Called with the inode lock held.
782 * NOTE: we are not increasing the inode-refcount, you must call __iget()
783 * by hand after calling find_inode now! This simplifies iunique and won't
784 * add any additional branch in the common code.
786 static struct inode * find_inode(struct super_block * sb, unsigned long ino, struct list_head *head, find_inode_t find_actor, void *opaque)
788 struct list_head *tmp;
789 struct inode * inode;
797 inode = list_entry(tmp, struct inode, i_hash);
798 if (inode->i_ino != ino)
800 if (inode->i_sb != sb)
802 if (find_actor && !find_actor(inode, ino, opaque))
810 * new_inode - obtain an inode
813 * Allocates a new inode for given superblock.
816 struct inode * new_inode(struct super_block *sb)
818 static unsigned long last_ino;
819 struct inode * inode;
821 spin_lock_prefetch(&inode_lock);
823 inode = alloc_inode(sb);
825 spin_lock(&inode_lock);
826 inodes_stat.nr_inodes++;
827 list_add(&inode->i_list, &inode_in_use);
828 inode->i_ino = ++last_ino;
830 spin_unlock(&inode_lock);
836 * This is called without the inode lock held.. Be careful.
838 * We no longer cache the sb_flags in i_flags - see fs.h
839 * -- rmk@arm.uk.linux.org
841 static struct inode * get_new_inode(struct super_block *sb, unsigned long ino, struct list_head *head, find_inode_t find_actor, void *opaque)
843 struct inode * inode;
845 inode = alloc_inode(sb);
849 spin_lock(&inode_lock);
850 /* We released the lock, so.. */
851 old = find_inode(sb, ino, head, find_actor, opaque);
853 inodes_stat.nr_inodes++;
854 list_add(&inode->i_list, &inode_in_use);
855 list_add(&inode->i_hash, head);
857 inode->i_state = I_LOCK;
858 spin_unlock(&inode_lock);
860 /* reiserfs specific hack right here. We don't
861 ** want this to last, and are looking for VFS changes
862 ** that will allow us to get rid of it.
865 if (sb->s_op->read_inode2) {
866 sb->s_op->read_inode2(inode, opaque) ;
868 sb->s_op->read_inode(inode);
872 * This is special! We do not need the spinlock
873 * when clearing I_LOCK, because we're guaranteed
874 * that nobody else tries to do anything about the
875 * state of the inode when it is locked, as we
876 * just created it (so there can be no old holders
877 * that haven't tested I_LOCK).
879 inode->i_state &= ~I_LOCK;
880 wake_up(&inode->i_wait);
886 * Uhhuh, somebody else created the same inode under
887 * us. Use the old inode instead of the one we just
891 spin_unlock(&inode_lock);
892 destroy_inode(inode);
894 wait_on_inode(inode);
899 static inline unsigned long hash(struct super_block *sb, unsigned long i_ino)
901 unsigned long tmp = i_ino + ((unsigned long) sb / L1_CACHE_BYTES);
902 tmp = tmp + (tmp >> I_HASHBITS);
903 return tmp & I_HASHMASK;
906 /* Yeah, I know about quadratic hash. Maybe, later. */
909 * iunique - get a unique inode number
911 * @max_reserved: highest reserved inode number
913 * Obtain an inode number that is unique on the system for a given
914 * superblock. This is used by file systems that have no natural
915 * permanent inode numbering system. An inode number is returned that
916 * is higher than the reserved limit but unique.
919 * With a large number of inodes live on the file system this function
920 * currently becomes quite slow.
923 ino_t iunique(struct super_block *sb, ino_t max_reserved)
925 static ino_t counter = 0;
927 struct list_head * head;
929 spin_lock(&inode_lock);
931 if (counter > max_reserved) {
932 head = inode_hashtable + hash(sb,counter);
933 inode = find_inode(sb, res = counter++, head, NULL, NULL);
935 spin_unlock(&inode_lock);
939 counter = max_reserved + 1;
945 struct inode *igrab(struct inode *inode)
947 spin_lock(&inode_lock);
948 if (!(inode->i_state & I_FREEING))
952 * Handle the case where s_op->clear_inode is not been
953 * called yet, and somebody is calling igrab
954 * while the inode is getting freed.
957 spin_unlock(&inode_lock);
962 struct inode *iget4(struct super_block *sb, unsigned long ino, find_inode_t find_actor, void *opaque)
964 struct list_head * head = inode_hashtable + hash(sb,ino);
965 struct inode * inode;
967 spin_lock(&inode_lock);
968 inode = find_inode(sb, ino, head, find_actor, opaque);
971 spin_unlock(&inode_lock);
972 wait_on_inode(inode);
975 spin_unlock(&inode_lock);
978 * get_new_inode() will do the right thing, re-trying the search
979 * in case it had to block at any point.
981 return get_new_inode(sb, ino, head, find_actor, opaque);
985 * insert_inode_hash - hash an inode
986 * @inode: unhashed inode
988 * Add an inode to the inode hash for this superblock. If the inode
989 * has no superblock it is added to a separate anonymous chain.
992 void insert_inode_hash(struct inode *inode)
994 struct list_head *head = &anon_hash_chain;
996 head = inode_hashtable + hash(inode->i_sb, inode->i_ino);
997 spin_lock(&inode_lock);
998 list_add(&inode->i_hash, head);
999 spin_unlock(&inode_lock);
1003 * remove_inode_hash - remove an inode from the hash
1004 * @inode: inode to unhash
1006 * Remove an inode from the superblock or anonymous hash.
1009 void remove_inode_hash(struct inode *inode)
1011 spin_lock(&inode_lock);
1012 list_del(&inode->i_hash);
1013 INIT_LIST_HEAD(&inode->i_hash);
1014 spin_unlock(&inode_lock);
1018 * iput - put an inode
1019 * @inode: inode to put
1021 * Puts an inode, dropping its usage count. If the inode use count hits
1022 * zero the inode is also then freed and may be destroyed.
1025 void iput(struct inode *inode)
1028 struct super_block *sb = inode->i_sb;
1029 struct super_operations *op = NULL;
1031 if (inode->i_state == I_CLEAR)
1036 if (op && op->put_inode)
1037 op->put_inode(inode);
1039 if (!atomic_dec_and_lock(&inode->i_count, &inode_lock))
1042 if (!inode->i_nlink) {
1043 list_del(&inode->i_hash);
1044 INIT_LIST_HEAD(&inode->i_hash);
1045 list_del(&inode->i_list);
1046 INIT_LIST_HEAD(&inode->i_list);
1047 inode->i_state|=I_FREEING;
1048 inodes_stat.nr_inodes--;
1049 spin_unlock(&inode_lock);
1051 if (inode->i_data.nrpages)
1052 truncate_inode_pages(&inode->i_data, 0);
1054 if (op && op->delete_inode) {
1055 void (*delete)(struct inode *) = op->delete_inode;
1056 if (!is_bad_inode(inode))
1058 /* s_op->delete_inode internally recalls clear_inode() */
1062 if (inode->i_state != I_CLEAR)
1065 if (!list_empty(&inode->i_hash)) {
1066 if (!(inode->i_state & (I_DIRTY|I_LOCK))) {
1067 list_del(&inode->i_list);
1068 list_add(&inode->i_list, &inode_unused);
1070 inodes_stat.nr_unused++;
1071 spin_unlock(&inode_lock);
1072 if (!sb || (sb->s_flags & MS_ACTIVE))
1074 write_inode_now(inode, 1);
1075 spin_lock(&inode_lock);
1076 inodes_stat.nr_unused--;
1077 list_del_init(&inode->i_hash);
1079 list_del_init(&inode->i_list);
1080 inode->i_state|=I_FREEING;
1081 inodes_stat.nr_inodes--;
1082 spin_unlock(&inode_lock);
1083 if (inode->i_data.nrpages)
1084 truncate_inode_pages(&inode->i_data, 0);
1087 destroy_inode(inode);
1091 void force_delete(struct inode *inode)
1094 * Kill off unused inodes ... iput() will unhash and
1095 * delete the inode if we set i_nlink to zero.
1097 if (atomic_read(&inode->i_count) == 1)
1102 * bmap - find a block number in a file
1103 * @inode: inode of file
1104 * @block: block to find
1106 * Returns the block number on the device holding the inode that
1107 * is the disk block number for the block of the file requested.
1108 * That is, asked for block 4 of inode 1 the function will return the
1109 * disk block relative to the disk start that holds that block of the
1113 int bmap(struct inode * inode, int block)
1116 if (inode->i_mapping->a_ops->bmap)
1117 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1122 * Initialize the hash tables.
1124 void __init inode_init(unsigned long mempages)
1126 struct list_head *head;
1127 unsigned long order;
1128 unsigned int nr_hash;
1131 mempages >>= (14 - PAGE_SHIFT);
1132 mempages *= sizeof(struct list_head);
1133 for (order = 0; ((1UL << order) << PAGE_SHIFT) < mempages; order++)
1139 nr_hash = (1UL << order) * PAGE_SIZE /
1140 sizeof(struct list_head);
1141 i_hash_mask = (nr_hash - 1);
1145 while ((tmp >>= 1UL) != 0UL)
1148 inode_hashtable = (struct list_head *)
1149 __get_free_pages(GFP_ATOMIC, order);
1150 } while (inode_hashtable == NULL && --order >= 0);
1152 printk(KERN_INFO "Inode cache hash table entries: %d (order: %ld, %ld bytes)\n",
1153 nr_hash, order, (PAGE_SIZE << order));
1155 if (!inode_hashtable)
1156 panic("Failed to allocate inode hash table\n");
1158 head = inode_hashtable;
1161 INIT_LIST_HEAD(head);
1166 /* inode slab cache */
1167 inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode),
1168 0, SLAB_HWCACHE_ALIGN, init_once,
1171 panic("cannot create inode slab cache");
1173 unused_inodes_flush_task.routine = try_to_sync_unused_inodes;
1177 * update_atime - update the access time
1178 * @inode: inode accessed
1180 * Update the accessed time on an inode and mark it for writeback.
1181 * This function automatically handles read only file systems and media,
1182 * as well as the "noatime" flag and inode specific "noatime" markers.
1185 void update_atime (struct inode *inode)
1187 if (inode->i_atime == CURRENT_TIME)
1189 if ( IS_NOATIME (inode) ) return;
1190 if ( IS_NODIRATIME (inode) && S_ISDIR (inode->i_mode) ) return;
1191 if ( IS_RDONLY (inode) ) return;
1192 inode->i_atime = CURRENT_TIME;
1193 mark_inode_dirty_sync (inode);
1194 } /* End Function update_atime */
1198 * Quota functions that want to walk the inode lists..
1202 /* Functions back in dquot.c */
1203 void put_dquot_list(struct list_head *);
1204 int remove_inode_dquot_ref(struct inode *, short, struct list_head *);
1206 void remove_dquot_ref(struct super_block *sb, short type)
1208 struct inode *inode;
1209 struct list_head *act_head;
1210 LIST_HEAD(tofree_head);
1213 return; /* nothing to do */
1214 /* We have to be protected against other CPUs */
1215 lock_kernel(); /* This lock is for quota code */
1216 spin_lock(&inode_lock); /* This lock is for inodes code */
1218 list_for_each(act_head, &inode_in_use) {
1219 inode = list_entry(act_head, struct inode, i_list);
1220 if (inode->i_sb == sb && IS_QUOTAINIT(inode))
1221 remove_inode_dquot_ref(inode, type, &tofree_head);
1223 list_for_each(act_head, &inode_unused) {
1224 inode = list_entry(act_head, struct inode, i_list);
1225 if (inode->i_sb == sb && IS_QUOTAINIT(inode))
1226 remove_inode_dquot_ref(inode, type, &tofree_head);
1228 list_for_each(act_head, &sb->s_dirty) {
1229 inode = list_entry(act_head, struct inode, i_list);
1230 if (IS_QUOTAINIT(inode))
1231 remove_inode_dquot_ref(inode, type, &tofree_head);
1233 list_for_each(act_head, &sb->s_locked_inodes) {
1234 inode = list_entry(act_head, struct inode, i_list);
1235 if (IS_QUOTAINIT(inode))
1236 remove_inode_dquot_ref(inode, type, &tofree_head);
1238 spin_unlock(&inode_lock);
1241 put_dquot_list(&tofree_head);