2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * This file is released under the GPL.
13 * This virtual memory filesystem is heavily based on the ramfs. It
14 * extends ramfs by the ability to use swap and honor resource limits
15 * which makes it a completely usable filesystem.
18 #include <linux/config.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/devfs_fs_kernel.h>
24 #include <linux/file.h>
25 #include <linux/swap.h>
26 #include <linux/pagemap.h>
27 #include <linux/string.h>
28 #include <linux/locks.h>
29 #include <linux/smp_lock.h>
31 #include <asm/uaccess.h>
33 /* This magic number is used in glibc for posix shared memory */
34 #define TMPFS_MAGIC 0x01021994
36 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
37 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
39 #define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + ENTRIES_PER_PAGE * (ENTRIES_PER_PAGE/2) * (ENTRIES_PER_PAGE+1))
40 #define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
41 #define VM_ACCT(size) (((size) + PAGE_CACHE_SIZE - 1) >> PAGE_SHIFT)
43 /* Pretend that each entry is of this size in directory's i_size */
44 #define BOGO_DIRENT_SIZE 20
46 #define SHMEM_SB(sb) (&sb->u.shmem_sb)
48 static struct super_operations shmem_ops;
49 static struct address_space_operations shmem_aops;
50 static struct file_operations shmem_file_operations;
51 static struct inode_operations shmem_inode_operations;
52 static struct inode_operations shmem_dir_inode_operations;
53 static struct vm_operations_struct shmem_vm_ops;
55 LIST_HEAD (shmem_inodes);
56 static spinlock_t shmem_ilock = SPIN_LOCK_UNLOCKED;
57 atomic_t shmem_nrpages = ATOMIC_INIT(0); /* Not used right now */
59 static struct page *shmem_getpage_locked(struct shmem_inode_info *, struct inode *, unsigned long);
62 * shmem_recalc_inode - recalculate the size of an inode
64 * @inode: inode to recalc
65 * @swap: additional swap pages freed externally
67 * We have to calculate the free blocks since the mm can drop pages
70 * But we know that normally
71 * inodes->i_blocks/BLOCKS_PER_PAGE ==
72 * inode->i_mapping->nrpages + info->swapped
75 * inodes->i_blocks/BLOCKS_PER_PAGE -
76 * (inode->i_mapping->nrpages + info->swapped)
78 * It has to be called with the spinlock held.
81 static void shmem_recalc_inode(struct inode * inode)
85 freed = (inode->i_blocks/BLOCKS_PER_PAGE) -
86 (inode->i_mapping->nrpages + SHMEM_I(inode)->swapped);
88 struct shmem_sb_info * sbinfo = SHMEM_SB(inode->i_sb);
89 inode->i_blocks -= freed*BLOCKS_PER_PAGE;
90 spin_lock (&sbinfo->stat_lock);
91 sbinfo->free_blocks += freed;
92 spin_unlock (&sbinfo->stat_lock);
97 * shmem_swp_entry - find the swap vector position in the info structure
99 * @info: info structure for the inode
100 * @index: index of the page to find
101 * @page: optional page to add to the structure. Has to be preset to
104 * If there is no space allocated yet it will return -ENOMEM when
105 * page == 0 else it will use the page for the needed block.
107 * returns -EFBIG if the index is too big.
110 * The swap vector is organized the following way:
112 * There are SHMEM_NR_DIRECT entries directly stored in the
113 * shmem_inode_info structure. So small files do not need an addional
116 * For pages with index > SHMEM_NR_DIRECT there is the pointer
117 * i_indirect which points to a page which holds in the first half
118 * doubly indirect blocks, in the second half triple indirect blocks:
120 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
121 * following layout (for SHMEM_NR_DIRECT == 16):
123 * i_indirect -> dir --> 16-19
136 static swp_entry_t * shmem_swp_entry (struct shmem_inode_info *info, unsigned long index, unsigned long page)
138 unsigned long offset;
141 if (index < SHMEM_NR_DIRECT)
142 return info->i_direct+index;
144 index -= SHMEM_NR_DIRECT;
145 offset = index % ENTRIES_PER_PAGE;
146 index /= ENTRIES_PER_PAGE;
148 if (!info->i_indirect) {
149 info->i_indirect = (void *) page;
150 return ERR_PTR(-ENOMEM);
153 dir = info->i_indirect + index;
154 if (index >= ENTRIES_PER_PAGE/2) {
155 index -= ENTRIES_PER_PAGE/2;
156 dir = info->i_indirect + ENTRIES_PER_PAGE/2
157 + index/ENTRIES_PER_PAGE;
158 index %= ENTRIES_PER_PAGE;
161 *dir = (void *) page;
162 /* We return since we will need another page
164 return ERR_PTR(-ENOMEM);
166 dir = ((void **)*dir) + index;
170 return ERR_PTR(-ENOMEM);
173 return ((swp_entry_t *)*dir) + offset;
177 * shmem_alloc_entry - get the position of the swap entry for the
178 * page. If it does not exist allocate the entry
180 * @info: info structure for the inode
181 * @index: index of the page to find
183 static inline swp_entry_t * shmem_alloc_entry (struct shmem_inode_info *info, unsigned long index)
185 unsigned long page = 0;
188 if (index >= SHMEM_MAX_INDEX)
189 return ERR_PTR(-EFBIG);
191 if (info->next_index <= index)
192 info->next_index = index + 1;
194 while ((res = shmem_swp_entry(info,index,page)) == ERR_PTR(-ENOMEM)) {
195 page = get_zeroed_page(GFP_USER);
203 * shmem_free_swp - free some swap entries in a directory
205 * @dir: pointer to the directory
206 * @count: number of entries to scan
208 static int shmem_free_swp(swp_entry_t *dir, unsigned int count)
210 swp_entry_t *ptr, entry;
213 for (ptr = dir; ptr < dir + count; ptr++) {
217 *ptr = (swp_entry_t){0};
219 free_swap_and_cache(entry);
225 * shmem_truncate_direct - free the swap entries of a whole doubly
228 * @dir: pointer to the pointer to the block
229 * @start: offset to start from (in pages)
230 * @len: how many pages are stored in this block
232 * Returns the number of freed swap entries.
235 static inline unsigned long
236 shmem_truncate_direct(swp_entry_t *** dir, unsigned long start, unsigned long len) {
237 swp_entry_t **last, **ptr;
238 unsigned long off, freed = 0;
243 last = *dir + (len + ENTRIES_PER_PAGE-1) / ENTRIES_PER_PAGE;
244 off = start % ENTRIES_PER_PAGE;
246 for (ptr = *dir + start/ENTRIES_PER_PAGE; ptr < last; ptr++) {
253 freed += shmem_free_swp(*ptr, ENTRIES_PER_PAGE);
254 free_page ((unsigned long) *ptr);
257 freed += shmem_free_swp(*ptr+off,ENTRIES_PER_PAGE-off);
263 free_page((unsigned long) *dir);
270 * shmem_truncate_indirect - truncate an inode
272 * @info: the info structure of the inode
273 * @index: the index to truncate
275 * This function locates the last doubly indirect block and calls
276 * then shmem_truncate_direct to do the real work
278 static inline unsigned long
279 shmem_truncate_indirect(struct shmem_inode_info *info, unsigned long index)
282 unsigned long baseidx, len, start;
283 unsigned long max = info->next_index-1;
285 if (max < SHMEM_NR_DIRECT) {
286 info->next_index = index;
287 return shmem_free_swp(info->i_direct + index,
288 SHMEM_NR_DIRECT - index);
291 if (max < ENTRIES_PER_PAGE * ENTRIES_PER_PAGE/2 + SHMEM_NR_DIRECT) {
292 max -= SHMEM_NR_DIRECT;
293 base = (swp_entry_t ***) &info->i_indirect;
294 baseidx = SHMEM_NR_DIRECT;
297 max -= ENTRIES_PER_PAGE*ENTRIES_PER_PAGE/2+SHMEM_NR_DIRECT;
298 if (max >= ENTRIES_PER_PAGE*ENTRIES_PER_PAGE*ENTRIES_PER_PAGE/2)
301 baseidx = max & ~(ENTRIES_PER_PAGE*ENTRIES_PER_PAGE-1);
302 base = (swp_entry_t ***) info->i_indirect + ENTRIES_PER_PAGE/2 + baseidx/ENTRIES_PER_PAGE/ENTRIES_PER_PAGE ;
303 len = max - baseidx + 1;
304 baseidx += ENTRIES_PER_PAGE*ENTRIES_PER_PAGE/2+SHMEM_NR_DIRECT;
307 if (index > baseidx) {
308 info->next_index = index;
309 start = index - baseidx;
311 info->next_index = baseidx;
314 return shmem_truncate_direct(base, start, len);
317 static void shmem_truncate (struct inode * inode)
320 unsigned long partial;
321 unsigned long freed = 0;
322 struct shmem_inode_info * info = SHMEM_I(inode);
325 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
326 spin_lock (&info->lock);
327 index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
328 partial = inode->i_size & ~PAGE_CACHE_MASK;
331 swp_entry_t *entry = shmem_swp_entry(info, index-1, 0);
334 * This check is racy: it's faintly possible that page
335 * was assigned to swap during truncate_inode_pages,
336 * and now assigned to file; but better than nothing.
338 if (!IS_ERR(entry) && entry->val) {
339 spin_unlock(&info->lock);
340 page = shmem_getpage_locked(info, inode, index-1);
342 memclear_highpage_flush(page, partial,
343 PAGE_CACHE_SIZE - partial);
345 page_cache_release(page);
347 spin_lock(&info->lock);
351 while (index < info->next_index)
352 freed += shmem_truncate_indirect(info, index);
354 info->swapped -= freed;
355 shmem_recalc_inode(inode);
356 spin_unlock (&info->lock);
360 static void shmem_delete_inode(struct inode * inode)
362 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
364 if (inode->i_op->truncate == shmem_truncate) {
365 spin_lock (&shmem_ilock);
366 list_del (&SHMEM_I(inode)->list);
367 spin_unlock (&shmem_ilock);
369 shmem_truncate (inode);
371 spin_lock (&sbinfo->stat_lock);
372 sbinfo->free_inodes++;
373 spin_unlock (&sbinfo->stat_lock);
377 static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *ptr, swp_entry_t *eptr)
381 for (test = ptr; test < eptr; test++) {
382 if (test->val == entry.val)
388 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
395 ptr = info->i_direct;
396 spin_lock (&info->lock);
397 offset = info->next_index;
398 if (offset > SHMEM_NR_DIRECT)
399 offset = SHMEM_NR_DIRECT;
400 offset = shmem_find_swp(entry, ptr, ptr + offset);
404 for (idx = SHMEM_NR_DIRECT; idx < info->next_index;
405 idx += ENTRIES_PER_PAGE) {
406 ptr = shmem_swp_entry(info, idx, 0);
409 offset = info->next_index - idx;
410 if (offset > ENTRIES_PER_PAGE)
411 offset = ENTRIES_PER_PAGE;
412 offset = shmem_find_swp(entry, ptr, ptr + offset);
416 spin_unlock (&info->lock);
420 ptr[offset] = (swp_entry_t) {0};
421 delete_from_swap_cache(page);
422 add_to_page_cache(page, info->inode->i_mapping, offset + idx);
424 SetPageUptodate(page);
426 spin_unlock(&info->lock);
431 * shmem_unuse() search for an eventually swapped out shmem page.
433 void shmem_unuse(swp_entry_t entry, struct page *page)
436 struct shmem_inode_info * info;
438 spin_lock (&shmem_ilock);
439 list_for_each(p, &shmem_inodes) {
440 info = list_entry(p, struct shmem_inode_info, list);
442 if (info->swapped && shmem_unuse_inode(info, entry, page)) {
443 /* move head to start search for next from here */
444 list_del(&shmem_inodes);
445 list_add_tail(&shmem_inodes, p);
449 spin_unlock (&shmem_ilock);
453 * Move the page from the page cache to the swap cache.
455 * The page lock prevents multiple occurences of shmem_writepage at
456 * once. We still need to guard against racing with
457 * shmem_getpage_locked().
459 static int shmem_writepage(struct page * page)
461 struct shmem_inode_info *info;
462 swp_entry_t *entry, swap;
463 struct address_space *mapping;
467 if (!PageLocked(page))
469 if (!PageLaunder(page))
470 return fail_writepage(page);
472 mapping = page->mapping;
474 inode = mapping->host;
475 info = SHMEM_I(inode);
477 return fail_writepage(page);
479 swap = get_swap_page();
481 return fail_writepage(page);
483 spin_lock(&info->lock);
484 entry = shmem_swp_entry(info, index, 0);
485 if (IS_ERR(entry)) /* this had been allocated on page allocation */
487 shmem_recalc_inode(inode);
491 /* Remove it from the page cache */
492 remove_inode_page(page);
493 page_cache_release(page);
495 /* Add it to the swap cache */
496 if (add_to_swap_cache(page, swap) != 0) {
498 * Raced with "speculative" read_swap_cache_async.
499 * Add page back to page cache, unref swap, try again.
501 add_to_page_cache_locked(page, mapping, index);
502 spin_unlock(&info->lock);
509 spin_unlock(&info->lock);
510 SetPageUptodate(page);
511 set_page_dirty(page);
517 * shmem_getpage_locked - either get the page from swap or allocate a new one
519 * If we allocate a new one we do not mark it dirty. That's up to the
520 * vm. If we swap it in we mark it dirty since we also free the swap
521 * entry since a page cannot live in both the swap and page cache
523 * Called with the inode locked, so it cannot race with itself, but we
524 * still need to guard against racing with shm_writepage(), which might
525 * be trying to move the page to the swap cache as we run.
527 static struct page * shmem_getpage_locked(struct shmem_inode_info *info, struct inode * inode, unsigned long idx)
529 struct address_space * mapping = inode->i_mapping;
530 struct shmem_sb_info *sbinfo;
535 page = find_lock_page(mapping, idx);
539 entry = shmem_alloc_entry (info, idx);
541 return (void *)entry;
543 spin_lock (&info->lock);
545 /* The shmem_alloc_entry() call may have blocked, and
546 * shmem_writepage may have been moving a page between the page
547 * cache and swap cache. We need to recheck the page cache
548 * under the protection of the info->lock spinlock. */
550 page = find_get_page(mapping, idx);
552 if (TryLockPage(page))
554 spin_unlock (&info->lock);
558 shmem_recalc_inode(inode);
562 /* Look it up and read it in.. */
563 page = lookup_swap_cache(*entry);
565 swp_entry_t swap = *entry;
566 spin_unlock (&info->lock);
567 swapin_readahead(*entry);
568 page = read_swap_cache_async(*entry);
570 if (entry->val != swap.val)
572 return ERR_PTR(-ENOMEM);
575 if (!Page_Uptodate(page) && entry->val == swap.val) {
576 page_cache_release(page);
577 return ERR_PTR(-EIO);
580 /* Too bad we can't trust this page, because we
581 * dropped the info->lock spinlock */
582 page_cache_release(page);
586 /* We have to this with page locked to prevent races */
587 if (TryLockPage(page))
591 *entry = (swp_entry_t) {0};
592 delete_from_swap_cache(page);
593 flags = page->flags & ~((1 << PG_uptodate) | (1 << PG_error) | (1 << PG_referenced) | (1 << PG_arch_1));
594 page->flags = flags | (1 << PG_dirty);
595 add_to_page_cache_locked(page, mapping, idx);
597 spin_unlock (&info->lock);
599 sbinfo = SHMEM_SB(inode->i_sb);
600 spin_unlock (&info->lock);
601 spin_lock (&sbinfo->stat_lock);
602 if (sbinfo->free_blocks == 0)
604 sbinfo->free_blocks--;
605 spin_unlock (&sbinfo->stat_lock);
607 /* Ok, get a new page. We don't have to worry about the
608 * info->lock spinlock here: we cannot race against
609 * shm_writepage because we have already verified that
610 * there is no page present either in memory or in the
611 * swap cache, so we are guaranteed to be populating a
612 * new shm entry. The inode semaphore we already hold
613 * is enough to make this atomic. */
614 page = page_cache_alloc(mapping);
616 return ERR_PTR(-ENOMEM);
617 clear_highpage(page);
618 flush_dcache_page(page);
619 inode->i_blocks += BLOCKS_PER_PAGE;
620 add_to_page_cache (page, mapping, idx);
623 /* We have the page */
624 SetPageUptodate(page);
627 spin_unlock (&sbinfo->stat_lock);
628 return ERR_PTR(-ENOSPC);
631 spin_unlock (&info->lock);
633 page_cache_release(page);
637 static int shmem_getpage(struct inode * inode, unsigned long idx, struct page **ptr)
639 struct shmem_inode_info *info = SHMEM_I(inode);
643 *ptr = ERR_PTR(-EFAULT);
644 if (inode->i_size <= (loff_t) idx * PAGE_CACHE_SIZE)
647 *ptr = shmem_getpage_locked(info, inode, idx);
656 error = PTR_ERR(*ptr);
657 *ptr = NOPAGE_SIGBUS;
658 if (error == -ENOMEM)
663 struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused)
667 struct inode * inode = vma->vm_file->f_dentry->d_inode;
669 idx = (address - vma->vm_start) >> PAGE_CACHE_SHIFT;
670 idx += vma->vm_pgoff;
672 if (shmem_getpage(inode, idx, &page))
675 flush_page_to_ram(page);
679 void shmem_lock(struct file * file, int lock)
681 struct inode * inode = file->f_dentry->d_inode;
682 struct shmem_inode_info * info = SHMEM_I(inode);
689 static int shmem_mmap(struct file * file, struct vm_area_struct * vma)
691 struct vm_operations_struct * ops;
692 struct inode *inode = file->f_dentry->d_inode;
695 if (!inode->i_sb || !S_ISREG(inode->i_mode))
702 struct inode *shmem_get_inode(struct super_block *sb, int mode, int dev)
704 struct inode * inode;
705 struct shmem_inode_info *info;
706 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
708 spin_lock (&sbinfo->stat_lock);
709 if (!sbinfo->free_inodes) {
710 spin_unlock (&sbinfo->stat_lock);
713 sbinfo->free_inodes--;
714 spin_unlock (&sbinfo->stat_lock);
716 inode = new_inode(sb);
718 inode->i_mode = mode;
719 inode->i_uid = current->fsuid;
720 inode->i_gid = current->fsgid;
721 inode->i_blksize = PAGE_CACHE_SIZE;
723 inode->i_rdev = NODEV;
724 inode->i_mapping->a_ops = &shmem_aops;
725 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
726 info = SHMEM_I(inode);
728 spin_lock_init (&info->lock);
729 sema_init (&info->sem, 1);
730 switch (mode & S_IFMT) {
732 init_special_inode(inode, mode, dev);
735 inode->i_op = &shmem_inode_operations;
736 inode->i_fop = &shmem_file_operations;
737 spin_lock (&shmem_ilock);
738 list_add_tail(&info->list, &shmem_inodes);
739 spin_unlock (&shmem_ilock);
743 /* Some things misbehave if size == 0 on a directory */
744 inode->i_size = 2 * BOGO_DIRENT_SIZE;
745 inode->i_op = &shmem_dir_inode_operations;
746 inode->i_fop = &dcache_dir_ops;
755 static int shmem_set_size(struct shmem_sb_info *info,
756 unsigned long max_blocks, unsigned long max_inodes)
759 unsigned long blocks, inodes;
761 spin_lock(&info->stat_lock);
762 blocks = info->max_blocks - info->free_blocks;
763 inodes = info->max_inodes - info->free_inodes;
765 if (max_blocks < blocks)
767 if (max_inodes < inodes)
770 info->max_blocks = max_blocks;
771 info->free_blocks = max_blocks - blocks;
772 info->max_inodes = max_inodes;
773 info->free_inodes = max_inodes - inodes;
775 spin_unlock(&info->stat_lock);
781 static struct inode_operations shmem_symlink_inode_operations;
782 static struct inode_operations shmem_symlink_inline_operations;
785 shmem_file_write(struct file *file,const char *buf,size_t count,loff_t *ppos)
787 struct inode *inode = file->f_dentry->d_inode;
788 struct shmem_inode_info *info;
789 unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
792 unsigned long written;
796 if ((ssize_t) count < 0)
799 if (!access_ok(VERIFY_READ, buf, count))
817 if (file->f_flags & O_APPEND)
821 * Check whether we've reached the file size limit.
824 if (limit != RLIM_INFINITY) {
826 send_sig(SIGXFSZ, current, 0);
829 if (count > limit - pos) {
830 send_sig(SIGXFSZ, current, 0);
838 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
842 unsigned long bytes, index, offset;
846 * Try to find the page in the cache. If it isn't there,
847 * allocate a free page.
849 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
850 index = pos >> PAGE_CACHE_SHIFT;
851 bytes = PAGE_CACHE_SIZE - offset;
857 * Bring in the user page that we will copy from _first_.
858 * Otherwise there's a nasty deadlock on copying from the
859 * same page as we're writing to, without it being marked
862 { volatile unsigned char dummy;
863 __get_user(dummy, buf);
864 __get_user(dummy, buf+bytes-1);
867 info = SHMEM_I(inode);
869 page = shmem_getpage_locked(info, inode, index);
872 status = PTR_ERR(page);
876 /* We have exclusive IO access to the page.. */
877 if (!PageLocked(page)) {
882 status = copy_from_user(kaddr+offset, buf, bytes);
887 flush_dcache_page(page);
894 if (pos > inode->i_size)
898 /* Mark it unlocked again and drop the page.. */
900 page_cache_release(page);
907 err = written ? written : status;
913 ClearPageUptodate(page);
917 static void do_shmem_file_read(struct file * filp, loff_t *ppos, read_descriptor_t * desc)
919 struct inode *inode = filp->f_dentry->d_inode;
920 struct address_space *mapping = inode->i_mapping;
921 unsigned long index, offset;
923 index = *ppos >> PAGE_CACHE_SHIFT;
924 offset = *ppos & ~PAGE_CACHE_MASK;
928 unsigned long end_index, nr, ret;
930 end_index = inode->i_size >> PAGE_CACHE_SHIFT;
931 if (index > end_index)
933 nr = PAGE_CACHE_SIZE;
934 if (index == end_index) {
935 nr = inode->i_size & ~PAGE_CACHE_MASK;
942 if ((desc->error = shmem_getpage(inode, index, &page)))
945 if (mapping->i_mmap_shared != NULL)
946 flush_dcache_page(page);
949 * Ok, we have the page, and it's up-to-date, so
950 * now we can copy it to user space...
952 * The actor routine returns how many bytes were actually used..
953 * NOTE! This may not be the same as how much of a user buffer
954 * we filled up (we may be padding etc), so we can only update
955 * "pos" here (the actor routine has to update the user buffer
956 * pointers and the remaining count).
958 ret = file_read_actor(desc, page, offset, nr);
960 index += offset >> PAGE_CACHE_SHIFT;
961 offset &= ~PAGE_CACHE_MASK;
963 page_cache_release(page);
964 if (ret != nr || !desc->count)
968 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
972 static ssize_t shmem_file_read(struct file * filp, char * buf, size_t count, loff_t *ppos)
977 if (access_ok(VERIFY_WRITE, buf, count)) {
981 read_descriptor_t desc;
987 do_shmem_file_read(filp, ppos, &desc);
989 retval = desc.written;
997 static int shmem_statfs(struct super_block *sb, struct statfs *buf)
999 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1001 buf->f_type = TMPFS_MAGIC;
1002 buf->f_bsize = PAGE_CACHE_SIZE;
1003 spin_lock (&sbinfo->stat_lock);
1004 buf->f_blocks = sbinfo->max_blocks;
1005 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1006 buf->f_files = sbinfo->max_inodes;
1007 buf->f_ffree = sbinfo->free_inodes;
1008 spin_unlock (&sbinfo->stat_lock);
1009 buf->f_namelen = NAME_MAX;
1014 * Lookup the data. This is trivial - if the dentry didn't already
1015 * exist, we know it is negative.
1017 static struct dentry * shmem_lookup(struct inode *dir, struct dentry *dentry)
1019 d_add(dentry, NULL);
1024 * File creation. Allocate an inode, and we're done..
1026 static int shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, int dev)
1028 struct inode * inode = shmem_get_inode(dir->i_sb, mode, dev);
1029 int error = -ENOSPC;
1032 dir->i_size += BOGO_DIRENT_SIZE;
1033 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1034 d_instantiate(dentry, inode);
1035 dget(dentry); /* Extra count - pin the dentry in core */
1041 static int shmem_mkdir(struct inode * dir, struct dentry * dentry, int mode)
1045 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1051 static int shmem_create(struct inode *dir, struct dentry *dentry, int mode)
1053 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1059 static int shmem_link(struct dentry *old_dentry, struct inode * dir, struct dentry * dentry)
1061 struct inode *inode = old_dentry->d_inode;
1063 if (S_ISDIR(inode->i_mode))
1066 dir->i_size += BOGO_DIRENT_SIZE;
1067 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1069 atomic_inc(&inode->i_count); /* New dentry reference */
1070 dget(dentry); /* Extra pinning count for the created dentry */
1071 d_instantiate(dentry, inode);
1075 static inline int shmem_positive(struct dentry *dentry)
1077 return dentry->d_inode && !d_unhashed(dentry);
1081 * Check that a directory is empty (this works
1082 * for regular files too, they'll just always be
1083 * considered empty..).
1085 * Note that an empty directory can still have
1086 * children, they just all have to be negative..
1088 static int shmem_empty(struct dentry *dentry)
1090 struct list_head *list;
1092 spin_lock(&dcache_lock);
1093 list = dentry->d_subdirs.next;
1095 while (list != &dentry->d_subdirs) {
1096 struct dentry *de = list_entry(list, struct dentry, d_child);
1098 if (shmem_positive(de)) {
1099 spin_unlock(&dcache_lock);
1104 spin_unlock(&dcache_lock);
1108 static int shmem_unlink(struct inode * dir, struct dentry *dentry)
1110 struct inode *inode = dentry->d_inode;
1112 dir->i_size -= BOGO_DIRENT_SIZE;
1113 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1115 dput(dentry); /* Undo the count from "create" - this does all the work */
1119 static int shmem_rmdir(struct inode * dir, struct dentry *dentry)
1121 if (!shmem_empty(dentry))
1125 return shmem_unlink(dir, dentry);
1129 * The VFS layer already does all the dentry stuff for rename,
1130 * we just have to decrement the usage count for the target if
1131 * it exists so that the VFS layer correctly free's it when it
1134 static int shmem_rename(struct inode * old_dir, struct dentry *old_dentry, struct inode * new_dir,struct dentry *new_dentry)
1136 struct inode *inode = old_dentry->d_inode;
1137 int they_are_dirs = S_ISDIR(inode->i_mode);
1139 if (!shmem_empty(new_dentry))
1142 if (new_dentry->d_inode) {
1143 (void) shmem_unlink(new_dir, new_dentry);
1146 } else if (they_are_dirs) {
1151 old_dir->i_size -= BOGO_DIRENT_SIZE;
1152 new_dir->i_size += BOGO_DIRENT_SIZE;
1153 old_dir->i_ctime = old_dir->i_mtime =
1154 new_dir->i_ctime = new_dir->i_mtime =
1155 inode->i_ctime = CURRENT_TIME;
1159 static int shmem_symlink(struct inode * dir, struct dentry *dentry, const char * symname)
1162 struct inode *inode;
1165 struct shmem_inode_info * info;
1167 len = strlen(symname) + 1;
1168 if (len > PAGE_CACHE_SIZE)
1169 return -ENAMETOOLONG;
1171 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1175 info = SHMEM_I(inode);
1176 inode->i_size = len-1;
1177 if (len <= sizeof(struct shmem_inode_info)) {
1179 memcpy(info, symname, len);
1180 inode->i_op = &shmem_symlink_inline_operations;
1183 page = shmem_getpage_locked(info, inode, 0);
1187 return PTR_ERR(page);
1189 inode->i_op = &shmem_symlink_inode_operations;
1190 spin_lock (&shmem_ilock);
1191 list_add_tail(&info->list, &shmem_inodes);
1192 spin_unlock (&shmem_ilock);
1194 memcpy(kaddr, symname, len);
1198 page_cache_release(page);
1201 dir->i_size += BOGO_DIRENT_SIZE;
1202 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1203 d_instantiate(dentry, inode);
1208 static int shmem_readlink_inline(struct dentry *dentry, char *buffer, int buflen)
1210 return vfs_readlink(dentry,buffer,buflen, (const char *)SHMEM_I(dentry->d_inode));
1213 static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1215 return vfs_follow_link(nd, (const char *)SHMEM_I(dentry->d_inode));
1218 static int shmem_readlink(struct dentry *dentry, char *buffer, int buflen)
1221 int res = shmem_getpage(dentry->d_inode, 0, &page);
1226 res = vfs_readlink(dentry,buffer,buflen, kmap(page));
1228 page_cache_release(page);
1232 static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1235 int res = shmem_getpage(dentry->d_inode, 0, &page);
1239 res = vfs_follow_link(nd, kmap(page));
1241 page_cache_release(page);
1245 static struct inode_operations shmem_symlink_inline_operations = {
1246 readlink: shmem_readlink_inline,
1247 follow_link: shmem_follow_link_inline,
1250 static struct inode_operations shmem_symlink_inode_operations = {
1251 truncate: shmem_truncate,
1252 readlink: shmem_readlink,
1253 follow_link: shmem_follow_link,
1256 static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, unsigned long * blocks, unsigned long *inodes)
1258 char *this_char, *value, *rest;
1262 this_char = strtok(options,",");
1263 for ( ; this_char; this_char = strtok(NULL,",")) {
1264 if ((value = strchr(this_char,'=')) != NULL) {
1268 "tmpfs: No value for mount option '%s'\n",
1273 if (!strcmp(this_char,"size")) {
1274 unsigned long long size;
1275 size = memparse(value,&rest);
1278 *blocks = size >> PAGE_CACHE_SHIFT;
1279 } else if (!strcmp(this_char,"nr_blocks")) {
1280 *blocks = memparse(value,&rest);
1283 } else if (!strcmp(this_char,"nr_inodes")) {
1284 *inodes = memparse(value,&rest);
1287 } else if (!strcmp(this_char,"mode")) {
1290 *mode = simple_strtoul(value,&rest,8);
1293 } else if (!strcmp(this_char,"uid")) {
1296 *uid = simple_strtoul(value,&rest,0);
1299 } else if (!strcmp(this_char,"gid")) {
1302 *gid = simple_strtoul(value,&rest,0);
1306 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
1314 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
1320 static int shmem_remount_fs (struct super_block *sb, int *flags, char *data)
1322 struct shmem_sb_info *sbinfo = &sb->u.shmem_sb;
1323 unsigned long max_blocks = sbinfo->max_blocks;
1324 unsigned long max_inodes = sbinfo->max_inodes;
1326 if (shmem_parse_options (data, NULL, NULL, NULL, &max_blocks, &max_inodes))
1328 return shmem_set_size(sbinfo, max_blocks, max_inodes);
1331 int shmem_sync_file(struct file * file, struct dentry *dentry, int datasync)
1337 static struct super_block *shmem_read_super(struct super_block * sb, void * data, int silent)
1339 struct inode * inode;
1340 struct dentry * root;
1341 unsigned long blocks, inodes;
1342 int mode = S_IRWXUGO | S_ISVTX;
1343 uid_t uid = current->fsuid;
1344 gid_t gid = current->fsgid;
1345 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1349 * Per default we only allow half of the physical ram per
1353 blocks = inodes = si.totalram / 2;
1356 if (shmem_parse_options (data, &mode, &uid, &gid, &blocks, &inodes))
1360 spin_lock_init (&sbinfo->stat_lock);
1361 sbinfo->max_blocks = blocks;
1362 sbinfo->free_blocks = blocks;
1363 sbinfo->max_inodes = inodes;
1364 sbinfo->free_inodes = inodes;
1365 sb->s_maxbytes = SHMEM_MAX_BYTES;
1366 sb->s_blocksize = PAGE_CACHE_SIZE;
1367 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1368 sb->s_magic = TMPFS_MAGIC;
1369 sb->s_op = &shmem_ops;
1370 inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
1376 root = d_alloc_root(inode);
1387 static struct address_space_operations shmem_aops = {
1388 writepage: shmem_writepage,
1391 static struct file_operations shmem_file_operations = {
1394 read: shmem_file_read,
1395 write: shmem_file_write,
1396 fsync: shmem_sync_file,
1400 static struct inode_operations shmem_inode_operations = {
1401 truncate: shmem_truncate,
1404 static struct inode_operations shmem_dir_inode_operations = {
1406 create: shmem_create,
1407 lookup: shmem_lookup,
1409 unlink: shmem_unlink,
1410 symlink: shmem_symlink,
1414 rename: shmem_rename,
1418 static struct super_operations shmem_ops = {
1420 statfs: shmem_statfs,
1421 remount_fs: shmem_remount_fs,
1423 delete_inode: shmem_delete_inode,
1424 put_inode: force_delete,
1427 static struct vm_operations_struct shmem_vm_ops = {
1428 nopage: shmem_nopage,
1432 /* type "shm" will be tagged obsolete in 2.5 */
1433 static DECLARE_FSTYPE(shmem_fs_type, "shm", shmem_read_super, FS_LITTER);
1434 static DECLARE_FSTYPE(tmpfs_fs_type, "tmpfs", shmem_read_super, FS_LITTER);
1436 static DECLARE_FSTYPE(tmpfs_fs_type, "tmpfs", shmem_read_super, FS_LITTER|FS_NOMOUNT);
1438 static struct vfsmount *shm_mnt;
1440 static int __init init_shmem_fs(void)
1443 struct vfsmount * res;
1445 if ((error = register_filesystem(&tmpfs_fs_type))) {
1446 printk (KERN_ERR "Could not register tmpfs\n");
1450 if ((error = register_filesystem(&shmem_fs_type))) {
1451 printk (KERN_ERR "Could not register shm fs\n");
1454 devfs_mk_dir (NULL, "shm", NULL);
1456 res = kern_mount(&tmpfs_fs_type);
1458 printk (KERN_ERR "could not kern_mount tmpfs\n");
1459 unregister_filesystem(&tmpfs_fs_type);
1460 return PTR_ERR(res);
1464 /* The internal instance should not do size checking */
1465 if ((error = shmem_set_size(SHMEM_SB(res->mnt_sb), ULONG_MAX, ULONG_MAX)))
1466 printk (KERN_ERR "could not set limits on internal tmpfs\n");
1471 static void __exit exit_shmem_fs(void)
1474 unregister_filesystem(&shmem_fs_type);
1476 unregister_filesystem(&tmpfs_fs_type);
1480 module_init(init_shmem_fs)
1481 module_exit(exit_shmem_fs)
1484 * shmem_file_setup - get an unlinked file living in shmem fs
1486 * @name: name for dentry (to be seen in /proc/<pid>/maps
1487 * @size: size to be set for the file
1490 struct file *shmem_file_setup(char * name, loff_t size)
1494 struct inode * inode;
1495 struct dentry *dentry, *root;
1497 int vm_enough_memory(long pages);
1499 if (size > SHMEM_MAX_BYTES)
1500 return ERR_PTR(-EINVAL);
1502 if (!vm_enough_memory(VM_ACCT(size)))
1503 return ERR_PTR(-ENOMEM);
1506 this.len = strlen(name);
1507 this.hash = 0; /* will go */
1508 root = shm_mnt->mnt_root;
1509 dentry = d_alloc(root, &this);
1511 return ERR_PTR(-ENOMEM);
1514 file = get_empty_filp();
1519 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
1523 d_instantiate(dentry, inode);
1524 inode->i_size = size;
1525 inode->i_nlink = 0; /* It is unlinked */
1526 file->f_vfsmnt = mntget(shm_mnt);
1527 file->f_dentry = dentry;
1528 file->f_op = &shmem_file_operations;
1529 file->f_mode = FMODE_WRITE | FMODE_READ;
1536 return ERR_PTR(error);
1540 * shmem_zero_setup - setup a shared anonymous mapping
1542 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
1544 int shmem_zero_setup(struct vm_area_struct *vma)
1547 loff_t size = vma->vm_end - vma->vm_start;
1549 file = shmem_file_setup("dev/zero", size);
1551 return PTR_ERR(file);
1554 fput (vma->vm_file);
1555 vma->vm_file = file;
1556 vma->vm_ops = &shmem_vm_ops;
1560 EXPORT_SYMBOL(shmem_file_setup);