make oldconfig will rebuild these...
[linux-2.4.21-pre4.git] / ipc / shm.c
1 /*
2  * linux/ipc/shm.c
3  * Copyright (C) 1992, 1993 Krishna Balasubramanian
4  *       Many improvements/fixes by Bruno Haible.
5  * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6  * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7  *
8  * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9  * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10  * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11  * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12  * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13  * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14  * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15  *
16  */
17
18 #include <linux/config.h>
19 #include <linux/slab.h>
20 #include <linux/shm.h>
21 #include <linux/init.h>
22 #include <linux/file.h>
23 #include <linux/mman.h>
24 #include <linux/proc_fs.h>
25 #include <asm/uaccess.h>
26
27 #include "util.h"
28
29 struct shmid_kernel /* private to the kernel */
30 {       
31         struct kern_ipc_perm    shm_perm;
32         struct file *           shm_file;
33         int                     id;
34         unsigned long           shm_nattch;
35         unsigned long           shm_segsz;
36         time_t                  shm_atim;
37         time_t                  shm_dtim;
38         time_t                  shm_ctim;
39         pid_t                   shm_cprid;
40         pid_t                   shm_lprid;
41 };
42
43 #define shm_flags       shm_perm.mode
44
45 static struct file_operations shm_file_operations;
46 static struct vm_operations_struct shm_vm_ops;
47
48 static struct ipc_ids shm_ids;
49
50 #define shm_lock(id)    ((struct shmid_kernel*)ipc_lock(&shm_ids,id))
51 #define shm_unlock(id)  ipc_unlock(&shm_ids,id)
52 #define shm_lockall()   ipc_lockall(&shm_ids)
53 #define shm_unlockall() ipc_unlockall(&shm_ids)
54 #define shm_get(id)     ((struct shmid_kernel*)ipc_get(&shm_ids,id))
55 #define shm_buildid(id, seq) \
56         ipc_buildid(&shm_ids, id, seq)
57
58 static int newseg (key_t key, int shmflg, size_t size);
59 static void shm_open (struct vm_area_struct *shmd);
60 static void shm_close (struct vm_area_struct *shmd);
61 #ifdef CONFIG_PROC_FS
62 static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
63 #endif
64
65 size_t  shm_ctlmax = SHMMAX;
66 size_t  shm_ctlall = SHMALL;
67 int     shm_ctlmni = SHMMNI;
68
69 static int shm_tot; /* total number of shared memory pages */
70
71 void __init shm_init (void)
72 {
73         ipc_init_ids(&shm_ids, 1);
74 #ifdef CONFIG_PROC_FS
75         create_proc_read_entry("sysvipc/shm", 0, 0, sysvipc_shm_read_proc, NULL);
76 #endif
77 }
78
79 static inline int shm_checkid(struct shmid_kernel *s, int id)
80 {
81         if (ipc_checkid(&shm_ids,&s->shm_perm,id))
82                 return -EIDRM;
83         return 0;
84 }
85
86 static inline struct shmid_kernel *shm_rmid(int id)
87 {
88         return (struct shmid_kernel *)ipc_rmid(&shm_ids,id);
89 }
90
91 static inline int shm_addid(struct shmid_kernel *shp)
92 {
93         return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni+1);
94 }
95
96
97
98 static inline void shm_inc (int id) {
99         struct shmid_kernel *shp;
100
101         if(!(shp = shm_lock(id)))
102                 BUG();
103         shp->shm_atim = CURRENT_TIME;
104         shp->shm_lprid = current->pid;
105         shp->shm_nattch++;
106         shm_unlock(id);
107 }
108
109 /* This is called by fork, once for every shm attach. */
110 static void shm_open (struct vm_area_struct *shmd)
111 {
112         shm_inc (shmd->vm_file->f_dentry->d_inode->i_ino);
113 }
114
115 /*
116  * shm_destroy - free the struct shmid_kernel
117  *
118  * @shp: struct to free
119  *
120  * It has to be called with shp and shm_ids.sem locked,
121  * but returns with shp unlocked and freed.
122  */
123 static void shm_destroy (struct shmid_kernel *shp)
124 {
125         shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
126         shm_rmid (shp->id);
127         shm_unlock(shp->id);
128         shmem_lock(shp->shm_file, 0);
129         fput (shp->shm_file);
130         kfree (shp);
131 }
132
133 /*
134  * remove the attach descriptor shmd.
135  * free memory for segment if it is marked destroyed.
136  * The descriptor has already been removed from the current->mm->mmap list
137  * and will later be kfree()d.
138  */
139 static void shm_close (struct vm_area_struct *shmd)
140 {
141         struct file * file = shmd->vm_file;
142         int id = file->f_dentry->d_inode->i_ino;
143         struct shmid_kernel *shp;
144
145         down (&shm_ids.sem);
146         /* remove from the list of attaches of the shm segment */
147         if(!(shp = shm_lock(id)))
148                 BUG();
149         shp->shm_lprid = current->pid;
150         shp->shm_dtim = CURRENT_TIME;
151         shp->shm_nattch--;
152         if(shp->shm_nattch == 0 &&
153            shp->shm_flags & SHM_DEST)
154                 shm_destroy (shp);
155         else
156                 shm_unlock(id);
157         up (&shm_ids.sem);
158 }
159
160 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
161 {
162         UPDATE_ATIME(file->f_dentry->d_inode);
163         vma->vm_ops = &shm_vm_ops;
164         shm_inc(file->f_dentry->d_inode->i_ino);
165         return 0;
166 }
167
168 static struct file_operations shm_file_operations = {
169         mmap:   shm_mmap
170 };
171
172 static struct vm_operations_struct shm_vm_ops = {
173         open:   shm_open,       /* callback for a new vm-area open */
174         close:  shm_close,      /* callback for when the vm-area is released */
175         nopage: shmem_nopage,
176 };
177
178 static int newseg (key_t key, int shmflg, size_t size)
179 {
180         int error;
181         struct shmid_kernel *shp;
182         int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
183         struct file * file;
184         char name[13];
185         int id;
186
187         if (size < SHMMIN || size > shm_ctlmax)
188                 return -EINVAL;
189
190         if (shm_tot + numpages >= shm_ctlall)
191                 return -ENOSPC;
192
193         shp = (struct shmid_kernel *) kmalloc (sizeof (*shp), GFP_USER);
194         if (!shp)
195                 return -ENOMEM;
196         sprintf (name, "SYSV%08x", key);
197         file = shmem_file_setup(name, size);
198         error = PTR_ERR(file);
199         if (IS_ERR(file))
200                 goto no_file;
201
202         error = -ENOSPC;
203         id = shm_addid(shp);
204         if(id == -1) 
205                 goto no_id;
206         shp->shm_perm.key = key;
207         shp->shm_flags = (shmflg & S_IRWXUGO);
208         shp->shm_cprid = current->pid;
209         shp->shm_lprid = 0;
210         shp->shm_atim = shp->shm_dtim = 0;
211         shp->shm_ctim = CURRENT_TIME;
212         shp->shm_segsz = size;
213         shp->shm_nattch = 0;
214         shp->id = shm_buildid(id,shp->shm_perm.seq);
215         shp->shm_file = file;
216         file->f_dentry->d_inode->i_ino = shp->id;
217         file->f_op = &shm_file_operations;
218         shm_tot += numpages;
219         shm_unlock (id);
220         return shp->id;
221
222 no_id:
223         fput(file);
224 no_file:
225         kfree(shp);
226         return error;
227 }
228
229 asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
230 {
231         struct shmid_kernel *shp;
232         int err, id = 0;
233
234         down(&shm_ids.sem);
235         if (key == IPC_PRIVATE) {
236                 err = newseg(key, shmflg, size);
237         } else if ((id = ipc_findkey(&shm_ids, key)) == -1) {
238                 if (!(shmflg & IPC_CREAT))
239                         err = -ENOENT;
240                 else
241                         err = newseg(key, shmflg, size);
242         } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
243                 err = -EEXIST;
244         } else {
245                 shp = shm_lock(id);
246                 if(shp==NULL)
247                         BUG();
248                 if (shp->shm_segsz < size)
249                         err = -EINVAL;
250                 else if (ipcperms(&shp->shm_perm, shmflg))
251                         err = -EACCES;
252                 else
253                         err = shm_buildid(id, shp->shm_perm.seq);
254                 shm_unlock(id);
255         }
256         up(&shm_ids.sem);
257         return err;
258 }
259
260 static inline unsigned long copy_shmid_to_user(void *buf, struct shmid64_ds *in, int version)
261 {
262         switch(version) {
263         case IPC_64:
264                 return copy_to_user(buf, in, sizeof(*in));
265         case IPC_OLD:
266             {
267                 struct shmid_ds out;
268
269                 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
270                 out.shm_segsz   = in->shm_segsz;
271                 out.shm_atime   = in->shm_atime;
272                 out.shm_dtime   = in->shm_dtime;
273                 out.shm_ctime   = in->shm_ctime;
274                 out.shm_cpid    = in->shm_cpid;
275                 out.shm_lpid    = in->shm_lpid;
276                 out.shm_nattch  = in->shm_nattch;
277
278                 return copy_to_user(buf, &out, sizeof(out));
279             }
280         default:
281                 return -EINVAL;
282         }
283 }
284
285 struct shm_setbuf {
286         uid_t   uid;
287         gid_t   gid;
288         mode_t  mode;
289 };      
290
291 static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void *buf, int version)
292 {
293         switch(version) {
294         case IPC_64:
295             {
296                 struct shmid64_ds tbuf;
297
298                 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
299                         return -EFAULT;
300
301                 out->uid        = tbuf.shm_perm.uid;
302                 out->gid        = tbuf.shm_perm.gid;
303                 out->mode       = tbuf.shm_flags;
304
305                 return 0;
306             }
307         case IPC_OLD:
308             {
309                 struct shmid_ds tbuf_old;
310
311                 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
312                         return -EFAULT;
313
314                 out->uid        = tbuf_old.shm_perm.uid;
315                 out->gid        = tbuf_old.shm_perm.gid;
316                 out->mode       = tbuf_old.shm_flags;
317
318                 return 0;
319             }
320         default:
321                 return -EINVAL;
322         }
323 }
324
325 static inline unsigned long copy_shminfo_to_user(void *buf, struct shminfo64 *in, int version)
326 {
327         switch(version) {
328         case IPC_64:
329                 return copy_to_user(buf, in, sizeof(*in));
330         case IPC_OLD:
331             {
332                 struct shminfo out;
333
334                 if(in->shmmax > INT_MAX)
335                         out.shmmax = INT_MAX;
336                 else
337                         out.shmmax = (int)in->shmmax;
338
339                 out.shmmin      = in->shmmin;
340                 out.shmmni      = in->shmmni;
341                 out.shmseg      = in->shmseg;
342                 out.shmall      = in->shmall; 
343
344                 return copy_to_user(buf, &out, sizeof(out));
345             }
346         default:
347                 return -EINVAL;
348         }
349 }
350
351 static void shm_get_stat (unsigned long *rss, unsigned long *swp) 
352 {
353         struct shmem_inode_info *info;
354         int i;
355
356         *rss = 0;
357         *swp = 0;
358
359         for(i = 0; i <= shm_ids.max_id; i++) {
360                 struct shmid_kernel* shp;
361                 struct inode * inode;
362
363                 shp = shm_get(i);
364                 if(shp == NULL)
365                         continue;
366                 inode = shp->shm_file->f_dentry->d_inode;
367                 info = SHMEM_I(inode);
368                 spin_lock (&info->lock);
369                 *rss += inode->i_mapping->nrpages;
370                 *swp += info->swapped;
371                 spin_unlock (&info->lock);
372         }
373 }
374
375 asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds *buf)
376 {
377         struct shm_setbuf setbuf;
378         struct shmid_kernel *shp;
379         int err, version;
380
381         if (cmd < 0 || shmid < 0)
382                 return -EINVAL;
383
384         version = ipc_parse_version(&cmd);
385
386         switch (cmd) { /* replace with proc interface ? */
387         case IPC_INFO:
388         {
389                 struct shminfo64 shminfo;
390
391                 memset(&shminfo,0,sizeof(shminfo));
392                 shminfo.shmmni = shminfo.shmseg = shm_ctlmni;
393                 shminfo.shmmax = shm_ctlmax;
394                 shminfo.shmall = shm_ctlall;
395
396                 shminfo.shmmin = SHMMIN;
397                 if(copy_shminfo_to_user (buf, &shminfo, version))
398                         return -EFAULT;
399                 /* reading a integer is always atomic */
400                 err= shm_ids.max_id;
401                 if(err<0)
402                         err = 0;
403                 return err;
404         }
405         case SHM_INFO:
406         {
407                 struct shm_info shm_info;
408
409                 memset(&shm_info,0,sizeof(shm_info));
410                 down(&shm_ids.sem);
411                 shm_lockall();
412                 shm_info.used_ids = shm_ids.in_use;
413                 shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
414                 shm_info.shm_tot = shm_tot;
415                 shm_info.swap_attempts = 0;
416                 shm_info.swap_successes = 0;
417                 err = shm_ids.max_id;
418                 shm_unlockall();
419                 up(&shm_ids.sem);
420                 if(copy_to_user (buf, &shm_info, sizeof(shm_info)))
421                         return -EFAULT;
422
423                 return err < 0 ? 0 : err;
424         }
425         case SHM_STAT:
426         case IPC_STAT:
427         {
428                 struct shmid64_ds tbuf;
429                 int result;
430                 memset(&tbuf, 0, sizeof(tbuf));
431                 shp = shm_lock(shmid);
432                 if(shp==NULL)
433                         return -EINVAL;
434                 if(cmd==SHM_STAT) {
435                         err = -EINVAL;
436                         if (shmid > shm_ids.max_id)
437                                 goto out_unlock;
438                         result = shm_buildid(shmid, shp->shm_perm.seq);
439                 } else {
440                         err = shm_checkid(shp,shmid);
441                         if(err)
442                                 goto out_unlock;
443                         result = 0;
444                 }
445                 err=-EACCES;
446                 if (ipcperms (&shp->shm_perm, S_IRUGO))
447                         goto out_unlock;
448                 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
449                 tbuf.shm_segsz  = shp->shm_segsz;
450                 tbuf.shm_atime  = shp->shm_atim;
451                 tbuf.shm_dtime  = shp->shm_dtim;
452                 tbuf.shm_ctime  = shp->shm_ctim;
453                 tbuf.shm_cpid   = shp->shm_cprid;
454                 tbuf.shm_lpid   = shp->shm_lprid;
455                 tbuf.shm_nattch = shp->shm_nattch;
456                 shm_unlock(shmid);
457                 if(copy_shmid_to_user (buf, &tbuf, version))
458                         return -EFAULT;
459                 return result;
460         }
461         case SHM_LOCK:
462         case SHM_UNLOCK:
463         {
464 /* Allow superuser to lock segment in memory */
465 /* Should the pages be faulted in here or leave it to user? */
466 /* need to determine interaction with current->swappable */
467                 if (!capable(CAP_IPC_LOCK))
468                         return -EPERM;
469
470                 shp = shm_lock(shmid);
471                 if(shp==NULL)
472                         return -EINVAL;
473                 err = shm_checkid(shp,shmid);
474                 if(err)
475                         goto out_unlock;
476                 if(cmd==SHM_LOCK) {
477                         shmem_lock(shp->shm_file, 1);
478                         shp->shm_flags |= SHM_LOCKED;
479                 } else {
480                         shmem_lock(shp->shm_file, 0);
481                         shp->shm_flags &= ~SHM_LOCKED;
482                 }
483                 shm_unlock(shmid);
484                 return err;
485         }
486         case IPC_RMID:
487         {
488                 /*
489                  *      We cannot simply remove the file. The SVID states
490                  *      that the block remains until the last person
491                  *      detaches from it, then is deleted. A shmat() on
492                  *      an RMID segment is legal in older Linux and if 
493                  *      we change it apps break...
494                  *
495                  *      Instead we set a destroyed flag, and then blow
496                  *      the name away when the usage hits zero.
497                  */
498                 down(&shm_ids.sem);
499                 shp = shm_lock(shmid);
500                 err = -EINVAL;
501                 if (shp == NULL) 
502                         goto out_up;
503                 err = shm_checkid(shp, shmid);
504                 if(err)
505                         goto out_unlock_up;
506                 if (current->euid != shp->shm_perm.uid &&
507                     current->euid != shp->shm_perm.cuid && 
508                     !capable(CAP_SYS_ADMIN)) {
509                         err=-EPERM;
510                         goto out_unlock_up;
511                 }
512                 if (shp->shm_nattch){
513                         shp->shm_flags |= SHM_DEST;
514                         /* Do not find it any more */
515                         shp->shm_perm.key = IPC_PRIVATE;
516                         shm_unlock(shmid);
517                 } else
518                         shm_destroy (shp);
519                 up(&shm_ids.sem);
520                 return err;
521         }
522
523         case IPC_SET:
524         {
525                 if(copy_shmid_from_user (&setbuf, buf, version))
526                         return -EFAULT;
527                 down(&shm_ids.sem);
528                 shp = shm_lock(shmid);
529                 err=-EINVAL;
530                 if(shp==NULL)
531                         goto out_up;
532                 err = shm_checkid(shp,shmid);
533                 if(err)
534                         goto out_unlock_up;
535                 err=-EPERM;
536                 if (current->euid != shp->shm_perm.uid &&
537                     current->euid != shp->shm_perm.cuid && 
538                     !capable(CAP_SYS_ADMIN)) {
539                         goto out_unlock_up;
540                 }
541
542                 shp->shm_perm.uid = setbuf.uid;
543                 shp->shm_perm.gid = setbuf.gid;
544                 shp->shm_flags = (shp->shm_flags & ~S_IRWXUGO)
545                         | (setbuf.mode & S_IRWXUGO);
546                 shp->shm_ctim = CURRENT_TIME;
547                 break;
548         }
549
550         default:
551                 return -EINVAL;
552         }
553
554         err = 0;
555 out_unlock_up:
556         shm_unlock(shmid);
557 out_up:
558         up(&shm_ids.sem);
559         return err;
560 out_unlock:
561         shm_unlock(shmid);
562         return err;
563 }
564
565 /*
566  * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
567  */
568 asmlinkage long sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr)
569 {
570         struct shmid_kernel *shp;
571         unsigned long addr;
572         unsigned long size;
573         struct file * file;
574         int    err;
575         unsigned long flags;
576         unsigned long prot;
577         unsigned long o_flags;
578         int acc_mode;
579         void *user_addr;
580
581         if (shmid < 0)
582                 return -EINVAL;
583
584         if ((addr = (ulong)shmaddr)) {
585                 if (addr & (SHMLBA-1)) {
586                         if (shmflg & SHM_RND)
587                                 addr &= ~(SHMLBA-1);       /* round down */
588                         else
589                                 return -EINVAL;
590                 }
591                 flags = MAP_SHARED | MAP_FIXED;
592         } else {
593                 if ((shmflg & SHM_REMAP))
594                         return -EINVAL;
595
596                 flags = MAP_SHARED;
597         }
598
599         if (shmflg & SHM_RDONLY) {
600                 prot = PROT_READ;
601                 o_flags = O_RDONLY;
602                 acc_mode = S_IRUGO;
603         } else {
604                 prot = PROT_READ | PROT_WRITE;
605                 o_flags = O_RDWR;
606                 acc_mode = S_IRUGO | S_IWUGO;
607         }
608
609         /*
610          * We cannot rely on the fs check since SYSV IPC does have an
611          * additional creator id...
612          */
613         shp = shm_lock(shmid);
614         if(shp == NULL)
615                 return -EINVAL;
616         err = shm_checkid(shp,shmid);
617         if (err) {
618                 shm_unlock(shmid);
619                 return err;
620         }
621         if (ipcperms(&shp->shm_perm, acc_mode)) {
622                 shm_unlock(shmid);
623                 return -EACCES;
624         }
625         file = shp->shm_file;
626         size = file->f_dentry->d_inode->i_size;
627         shp->shm_nattch++;
628         shm_unlock(shmid);
629
630         down_write(&current->mm->mmap_sem);
631         if (addr && !(shmflg & SHM_REMAP)) {
632                 user_addr = ERR_PTR(-EINVAL);
633                 if (find_vma_intersection(current->mm, addr, addr + size))
634                         goto invalid;
635                 /*
636                  * If shm segment goes below stack, make sure there is some
637                  * space left for the stack to grow (at least 4 pages).
638                  */
639                 if (addr < current->mm->start_stack &&
640                     addr > current->mm->start_stack - size - PAGE_SIZE * 5)
641                         goto invalid;
642         }
643                 
644         user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0);
645
646 invalid:
647         up_write(&current->mm->mmap_sem);
648
649         down (&shm_ids.sem);
650         if(!(shp = shm_lock(shmid)))
651                 BUG();
652         shp->shm_nattch--;
653         if(shp->shm_nattch == 0 &&
654            shp->shm_flags & SHM_DEST)
655                 shm_destroy (shp);
656         else
657                 shm_unlock(shmid);
658         up (&shm_ids.sem);
659
660         *raddr = (unsigned long) user_addr;
661         err = 0;
662         if (IS_ERR(user_addr))
663                 err = PTR_ERR(user_addr);
664         return err;
665
666 }
667
668 /*
669  * detach and kill segment if marked destroyed.
670  * The work is done in shm_close.
671  */
672 asmlinkage long sys_shmdt (char *shmaddr)
673 {
674         struct mm_struct *mm = current->mm;
675         struct vm_area_struct *shmd, *shmdnext;
676         int retval = -EINVAL;
677
678         down_write(&mm->mmap_sem);
679         for (shmd = mm->mmap; shmd; shmd = shmdnext) {
680                 shmdnext = shmd->vm_next;
681                 if (shmd->vm_ops == &shm_vm_ops
682                     && shmd->vm_start - (shmd->vm_pgoff << PAGE_SHIFT) == (ulong) shmaddr) {
683                         do_munmap(mm, shmd->vm_start, shmd->vm_end - shmd->vm_start);
684                         retval = 0;
685                 }
686         }
687         up_write(&mm->mmap_sem);
688         return retval;
689 }
690
691 #ifdef CONFIG_PROC_FS
692 static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
693 {
694         off_t pos = 0;
695         off_t begin = 0;
696         int i, len = 0;
697
698         down(&shm_ids.sem);
699         len += sprintf(buffer, "       key      shmid perms       size  cpid  lpid nattch   uid   gid  cuid  cgid      atime      dtime      ctime\n");
700
701         for(i = 0; i <= shm_ids.max_id; i++) {
702                 struct shmid_kernel* shp;
703
704                 shp = shm_lock(i);
705                 if(shp!=NULL) {
706 #define SMALL_STRING "%10d %10d  %4o %10u %5u %5u  %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
707 #define BIG_STRING   "%10d %10d  %4o %21u %5u %5u  %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
708                         char *format;
709
710                         if (sizeof(size_t) <= sizeof(int))
711                                 format = SMALL_STRING;
712                         else
713                                 format = BIG_STRING;
714                         len += sprintf(buffer + len, format,
715                                 shp->shm_perm.key,
716                                 shm_buildid(i, shp->shm_perm.seq),
717                                 shp->shm_flags,
718                                 shp->shm_segsz,
719                                 shp->shm_cprid,
720                                 shp->shm_lprid,
721                                 shp->shm_nattch,
722                                 shp->shm_perm.uid,
723                                 shp->shm_perm.gid,
724                                 shp->shm_perm.cuid,
725                                 shp->shm_perm.cgid,
726                                 shp->shm_atim,
727                                 shp->shm_dtim,
728                                 shp->shm_ctim);
729                         shm_unlock(i);
730
731                         pos += len;
732                         if(pos < offset) {
733                                 len = 0;
734                                 begin = pos;
735                         }
736                         if(pos > offset + length)
737                                 goto done;
738                 }
739         }
740         *eof = 1;
741 done:
742         up(&shm_ids.sem);
743         *start = buffer + (offset - begin);
744         len -= (offset - begin);
745         if(len > length)
746                 len = length;
747         if(len < 0)
748                 len = 0;
749         return len;
750 }
751 #endif