3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
18 #include <linux/config.h>
19 #include <linux/slab.h>
20 #include <linux/shm.h>
21 #include <linux/init.h>
22 #include <linux/file.h>
23 #include <linux/mman.h>
24 #include <linux/proc_fs.h>
25 #include <asm/uaccess.h>
29 struct shmid_kernel /* private to the kernel */
31 struct kern_ipc_perm shm_perm;
32 struct file * shm_file;
34 unsigned long shm_nattch;
35 unsigned long shm_segsz;
43 #define shm_flags shm_perm.mode
45 static struct file_operations shm_file_operations;
46 static struct vm_operations_struct shm_vm_ops;
48 static struct ipc_ids shm_ids;
50 #define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id))
51 #define shm_unlock(id) ipc_unlock(&shm_ids,id)
52 #define shm_lockall() ipc_lockall(&shm_ids)
53 #define shm_unlockall() ipc_unlockall(&shm_ids)
54 #define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id))
55 #define shm_buildid(id, seq) \
56 ipc_buildid(&shm_ids, id, seq)
58 static int newseg (key_t key, int shmflg, size_t size);
59 static void shm_open (struct vm_area_struct *shmd);
60 static void shm_close (struct vm_area_struct *shmd);
62 static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
65 size_t shm_ctlmax = SHMMAX;
66 size_t shm_ctlall = SHMALL;
67 int shm_ctlmni = SHMMNI;
69 static int shm_tot; /* total number of shared memory pages */
71 void __init shm_init (void)
73 ipc_init_ids(&shm_ids, 1);
75 create_proc_read_entry("sysvipc/shm", 0, 0, sysvipc_shm_read_proc, NULL);
79 static inline int shm_checkid(struct shmid_kernel *s, int id)
81 if (ipc_checkid(&shm_ids,&s->shm_perm,id))
86 static inline struct shmid_kernel *shm_rmid(int id)
88 return (struct shmid_kernel *)ipc_rmid(&shm_ids,id);
91 static inline int shm_addid(struct shmid_kernel *shp)
93 return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni+1);
98 static inline void shm_inc (int id) {
99 struct shmid_kernel *shp;
101 if(!(shp = shm_lock(id)))
103 shp->shm_atim = CURRENT_TIME;
104 shp->shm_lprid = current->pid;
109 /* This is called by fork, once for every shm attach. */
110 static void shm_open (struct vm_area_struct *shmd)
112 shm_inc (shmd->vm_file->f_dentry->d_inode->i_ino);
116 * shm_destroy - free the struct shmid_kernel
118 * @shp: struct to free
120 * It has to be called with shp and shm_ids.sem locked,
121 * but returns with shp unlocked and freed.
123 static void shm_destroy (struct shmid_kernel *shp)
125 shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
128 shmem_lock(shp->shm_file, 0);
129 fput (shp->shm_file);
134 * remove the attach descriptor shmd.
135 * free memory for segment if it is marked destroyed.
136 * The descriptor has already been removed from the current->mm->mmap list
137 * and will later be kfree()d.
139 static void shm_close (struct vm_area_struct *shmd)
141 struct file * file = shmd->vm_file;
142 int id = file->f_dentry->d_inode->i_ino;
143 struct shmid_kernel *shp;
146 /* remove from the list of attaches of the shm segment */
147 if(!(shp = shm_lock(id)))
149 shp->shm_lprid = current->pid;
150 shp->shm_dtim = CURRENT_TIME;
152 if(shp->shm_nattch == 0 &&
153 shp->shm_flags & SHM_DEST)
160 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
162 UPDATE_ATIME(file->f_dentry->d_inode);
163 vma->vm_ops = &shm_vm_ops;
164 if (!(vma->vm_flags & VM_WRITE))
165 vma->vm_flags &= ~VM_MAYWRITE;
166 shm_inc(file->f_dentry->d_inode->i_ino);
170 static struct file_operations shm_file_operations = {
174 static struct vm_operations_struct shm_vm_ops = {
175 open: shm_open, /* callback for a new vm-area open */
176 close: shm_close, /* callback for when the vm-area is released */
177 nopage: shmem_nopage,
180 static int newseg (key_t key, int shmflg, size_t size)
183 struct shmid_kernel *shp;
184 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
189 if (size < SHMMIN || size > shm_ctlmax)
192 if (shm_tot + numpages >= shm_ctlall)
195 shp = (struct shmid_kernel *) kmalloc (sizeof (*shp), GFP_USER);
198 sprintf (name, "SYSV%08x", key);
199 file = shmem_file_setup(name, size);
200 error = PTR_ERR(file);
208 shp->shm_perm.key = key;
209 shp->shm_flags = (shmflg & S_IRWXUGO);
210 shp->shm_cprid = current->pid;
212 shp->shm_atim = shp->shm_dtim = 0;
213 shp->shm_ctim = CURRENT_TIME;
214 shp->shm_segsz = size;
216 shp->id = shm_buildid(id,shp->shm_perm.seq);
217 shp->shm_file = file;
218 file->f_dentry->d_inode->i_ino = shp->id;
219 file->f_op = &shm_file_operations;
231 asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
233 struct shmid_kernel *shp;
237 if (key == IPC_PRIVATE) {
238 err = newseg(key, shmflg, size);
239 } else if ((id = ipc_findkey(&shm_ids, key)) == -1) {
240 if (!(shmflg & IPC_CREAT))
243 err = newseg(key, shmflg, size);
244 } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
250 if (shp->shm_segsz < size)
252 else if (ipcperms(&shp->shm_perm, shmflg))
255 err = shm_buildid(id, shp->shm_perm.seq);
262 static inline unsigned long copy_shmid_to_user(void *buf, struct shmid64_ds *in, int version)
266 return copy_to_user(buf, in, sizeof(*in));
271 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
272 out.shm_segsz = in->shm_segsz;
273 out.shm_atime = in->shm_atime;
274 out.shm_dtime = in->shm_dtime;
275 out.shm_ctime = in->shm_ctime;
276 out.shm_cpid = in->shm_cpid;
277 out.shm_lpid = in->shm_lpid;
278 out.shm_nattch = in->shm_nattch;
280 return copy_to_user(buf, &out, sizeof(out));
293 static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void *buf, int version)
298 struct shmid64_ds tbuf;
300 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
303 out->uid = tbuf.shm_perm.uid;
304 out->gid = tbuf.shm_perm.gid;
305 out->mode = tbuf.shm_flags;
311 struct shmid_ds tbuf_old;
313 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
316 out->uid = tbuf_old.shm_perm.uid;
317 out->gid = tbuf_old.shm_perm.gid;
318 out->mode = tbuf_old.shm_flags;
327 static inline unsigned long copy_shminfo_to_user(void *buf, struct shminfo64 *in, int version)
331 return copy_to_user(buf, in, sizeof(*in));
336 if(in->shmmax > INT_MAX)
337 out.shmmax = INT_MAX;
339 out.shmmax = (int)in->shmmax;
341 out.shmmin = in->shmmin;
342 out.shmmni = in->shmmni;
343 out.shmseg = in->shmseg;
344 out.shmall = in->shmall;
346 return copy_to_user(buf, &out, sizeof(out));
353 static void shm_get_stat (unsigned long *rss, unsigned long *swp)
355 struct shmem_inode_info *info;
361 for(i = 0; i <= shm_ids.max_id; i++) {
362 struct shmid_kernel* shp;
363 struct inode * inode;
368 inode = shp->shm_file->f_dentry->d_inode;
369 info = SHMEM_I(inode);
370 spin_lock (&info->lock);
371 *rss += inode->i_mapping->nrpages;
372 *swp += info->swapped;
373 spin_unlock (&info->lock);
377 asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
379 struct shm_setbuf setbuf;
380 struct shmid_kernel *shp;
383 if (cmd < 0 || shmid < 0)
386 version = ipc_parse_version(&cmd);
388 switch (cmd) { /* replace with proc interface ? */
391 struct shminfo64 shminfo;
393 memset(&shminfo,0,sizeof(shminfo));
394 shminfo.shmmni = shminfo.shmseg = shm_ctlmni;
395 shminfo.shmmax = shm_ctlmax;
396 shminfo.shmall = shm_ctlall;
398 shminfo.shmmin = SHMMIN;
399 if(copy_shminfo_to_user (buf, &shminfo, version))
401 /* reading a integer is always atomic */
409 struct shm_info shm_info;
411 memset(&shm_info,0,sizeof(shm_info));
414 shm_info.used_ids = shm_ids.in_use;
415 shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
416 shm_info.shm_tot = shm_tot;
417 shm_info.swap_attempts = 0;
418 shm_info.swap_successes = 0;
419 err = shm_ids.max_id;
422 if(copy_to_user (buf, &shm_info, sizeof(shm_info)))
425 return err < 0 ? 0 : err;
430 struct shmid64_ds tbuf;
432 memset(&tbuf, 0, sizeof(tbuf));
433 shp = shm_lock(shmid);
438 if (shmid > shm_ids.max_id)
440 result = shm_buildid(shmid, shp->shm_perm.seq);
442 err = shm_checkid(shp,shmid);
448 if (ipcperms (&shp->shm_perm, S_IRUGO))
450 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
451 tbuf.shm_segsz = shp->shm_segsz;
452 tbuf.shm_atime = shp->shm_atim;
453 tbuf.shm_dtime = shp->shm_dtim;
454 tbuf.shm_ctime = shp->shm_ctim;
455 tbuf.shm_cpid = shp->shm_cprid;
456 tbuf.shm_lpid = shp->shm_lprid;
457 tbuf.shm_nattch = shp->shm_nattch;
459 if(copy_shmid_to_user (buf, &tbuf, version))
466 /* Allow superuser to lock segment in memory */
467 /* Should the pages be faulted in here or leave it to user? */
468 /* need to determine interaction with current->swappable */
469 if (!capable(CAP_IPC_LOCK))
472 shp = shm_lock(shmid);
475 err = shm_checkid(shp,shmid);
479 shmem_lock(shp->shm_file, 1);
480 shp->shm_flags |= SHM_LOCKED;
482 shmem_lock(shp->shm_file, 0);
483 shp->shm_flags &= ~SHM_LOCKED;
491 * We cannot simply remove the file. The SVID states
492 * that the block remains until the last person
493 * detaches from it, then is deleted. A shmat() on
494 * an RMID segment is legal in older Linux and if
495 * we change it apps break...
497 * Instead we set a destroyed flag, and then blow
498 * the name away when the usage hits zero.
501 shp = shm_lock(shmid);
505 err = shm_checkid(shp, shmid);
508 if (current->euid != shp->shm_perm.uid &&
509 current->euid != shp->shm_perm.cuid &&
510 !capable(CAP_SYS_ADMIN)) {
514 if (shp->shm_nattch){
515 shp->shm_flags |= SHM_DEST;
516 /* Do not find it any more */
517 shp->shm_perm.key = IPC_PRIVATE;
527 if(copy_shmid_from_user (&setbuf, buf, version))
530 shp = shm_lock(shmid);
534 err = shm_checkid(shp,shmid);
538 if (current->euid != shp->shm_perm.uid &&
539 current->euid != shp->shm_perm.cuid &&
540 !capable(CAP_SYS_ADMIN)) {
544 shp->shm_perm.uid = setbuf.uid;
545 shp->shm_perm.gid = setbuf.gid;
546 shp->shm_flags = (shp->shm_flags & ~S_IRWXUGO)
547 | (setbuf.mode & S_IRWXUGO);
548 shp->shm_ctim = CURRENT_TIME;
568 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
570 asmlinkage long sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
572 struct shmid_kernel *shp;
579 unsigned long o_flags;
586 if ((addr = (ulong)shmaddr)) {
587 if (addr & (SHMLBA-1)) {
588 if (shmflg & SHM_RND)
589 addr &= ~(SHMLBA-1); /* round down */
593 flags = MAP_SHARED | MAP_FIXED;
595 if ((shmflg & SHM_REMAP))
601 if (shmflg & SHM_RDONLY) {
606 prot = PROT_READ | PROT_WRITE;
608 acc_mode = S_IRUGO | S_IWUGO;
612 * We cannot rely on the fs check since SYSV IPC does have an
613 * additional creator id...
615 shp = shm_lock(shmid);
618 err = shm_checkid(shp,shmid);
623 if (ipcperms(&shp->shm_perm, acc_mode)) {
627 file = shp->shm_file;
628 size = file->f_dentry->d_inode->i_size;
632 down_write(¤t->mm->mmap_sem);
633 if (addr && !(shmflg & SHM_REMAP)) {
634 user_addr = ERR_PTR(-EINVAL);
635 if (find_vma_intersection(current->mm, addr, addr + size))
638 * If shm segment goes below stack, make sure there is some
639 * space left for the stack to grow (at least 4 pages).
641 if (addr < current->mm->start_stack &&
642 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
646 user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0);
649 up_write(¤t->mm->mmap_sem);
652 if(!(shp = shm_lock(shmid)))
655 if(shp->shm_nattch == 0 &&
656 shp->shm_flags & SHM_DEST)
662 *raddr = (unsigned long) user_addr;
664 if (IS_ERR(user_addr))
665 err = PTR_ERR(user_addr);
671 * detach and kill segment if marked destroyed.
672 * The work is done in shm_close.
674 asmlinkage long sys_shmdt (char *shmaddr)
676 struct mm_struct *mm = current->mm;
677 struct vm_area_struct *shmd, *shmdnext;
678 int retval = -EINVAL;
680 down_write(&mm->mmap_sem);
681 for (shmd = mm->mmap; shmd; shmd = shmdnext) {
682 shmdnext = shmd->vm_next;
683 if (shmd->vm_ops == &shm_vm_ops
684 && shmd->vm_start - (shmd->vm_pgoff << PAGE_SHIFT) == (ulong) shmaddr) {
685 do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start);
689 up_write(&mm->mmap_sem);
693 #ifdef CONFIG_PROC_FS
694 static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
701 len += sprintf(buffer, " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n");
703 for(i = 0; i <= shm_ids.max_id; i++) {
704 struct shmid_kernel* shp;
708 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
709 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
712 if (sizeof(size_t) <= sizeof(int))
713 format = SMALL_STRING;
716 len += sprintf(buffer + len, format,
718 shm_buildid(i, shp->shm_perm.seq),
738 if(pos > offset + length)
745 *start = buffer + (offset - begin);
746 len -= (offset - begin);