make oldconfig will rebuild these...
[linux-2.4.21-pre4.git] / kernel / fork.c
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13
14 #include <linux/config.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/unistd.h>
18 #include <linux/smp_lock.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/completion.h>
22 #include <linux/namespace.h>
23 #include <linux/personality.h>
24 #include <linux/compiler.h>
25
26 #include <asm/pgtable.h>
27 #include <asm/pgalloc.h>
28 #include <asm/uaccess.h>
29 #include <asm/mmu_context.h>
30
31 /* The idle threads do not count.. */
32 int nr_threads;
33 int nr_running;
34
35 int max_threads;
36 unsigned long total_forks;      /* Handle normal Linux uptimes. */
37 int last_pid;
38
39 struct task_struct *pidhash[PIDHASH_SZ];
40
41 void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
42 {
43         unsigned long flags;
44
45         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
46         wq_write_lock_irqsave(&q->lock, flags);
47         __add_wait_queue(q, wait);
48         wq_write_unlock_irqrestore(&q->lock, flags);
49 }
50
51 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)
52 {
53         unsigned long flags;
54
55         wait->flags |= WQ_FLAG_EXCLUSIVE;
56         wq_write_lock_irqsave(&q->lock, flags);
57         __add_wait_queue_tail(q, wait);
58         wq_write_unlock_irqrestore(&q->lock, flags);
59 }
60
61 void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
62 {
63         unsigned long flags;
64
65         wq_write_lock_irqsave(&q->lock, flags);
66         __remove_wait_queue(q, wait);
67         wq_write_unlock_irqrestore(&q->lock, flags);
68 }
69
70 void __init fork_init(unsigned long mempages)
71 {
72         /*
73          * The default maximum number of threads is set to a safe
74          * value: the thread structures can take up at most half
75          * of memory.
76          */
77         max_threads = mempages / (THREAD_SIZE/PAGE_SIZE) / 8;
78
79         init_task.rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
80         init_task.rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
81 }
82
83 /* Protects next_safe and last_pid. */
84 spinlock_t lastpid_lock = SPIN_LOCK_UNLOCKED;
85
86 static int get_pid(unsigned long flags)
87 {
88         static int next_safe = PID_MAX;
89         struct task_struct *p;
90         int pid, beginpid;
91
92         if (flags & CLONE_PID)
93                 return current->pid;
94
95         spin_lock(&lastpid_lock);
96         beginpid = last_pid;
97         if((++last_pid) & 0xffff8000) {
98                 last_pid = 300;         /* Skip daemons etc. */
99                 goto inside;
100         }
101         if(last_pid >= next_safe) {
102 inside:
103                 next_safe = PID_MAX;
104                 read_lock(&tasklist_lock);
105         repeat:
106                 for_each_task(p) {
107                         if(p->pid == last_pid   ||
108                            p->pgrp == last_pid  ||
109                            p->tgid == last_pid  ||
110                            p->session == last_pid) {
111                                 if(++last_pid >= next_safe) {
112                                         if(last_pid & 0xffff8000)
113                                                 last_pid = 300;
114                                         next_safe = PID_MAX;
115                                 }
116                                 if(unlikely(last_pid == beginpid))
117                                         goto nomorepids;
118                                 goto repeat;
119                         }
120                         if(p->pid > last_pid && next_safe > p->pid)
121                                 next_safe = p->pid;
122                         if(p->pgrp > last_pid && next_safe > p->pgrp)
123                                 next_safe = p->pgrp;
124                         if(p->tgid > last_pid && next_safe > p->tgid)
125                                 next_safe = p->tgid;
126                         if(p->session > last_pid && next_safe > p->session)
127                                 next_safe = p->session;
128                 }
129                 read_unlock(&tasklist_lock);
130         }
131         pid = last_pid;
132         spin_unlock(&lastpid_lock);
133
134         return pid;
135
136 nomorepids:
137         read_unlock(&tasklist_lock);
138         spin_unlock(&lastpid_lock);
139         return 0;
140 }
141
142 static inline int dup_mmap(struct mm_struct * mm)
143 {
144         struct vm_area_struct * mpnt, *tmp, **pprev;
145         int retval;
146
147         flush_cache_mm(current->mm);
148         mm->locked_vm = 0;
149         mm->mmap = NULL;
150         mm->mmap_cache = NULL;
151         mm->map_count = 0;
152         mm->rss = 0;
153         mm->cpu_vm_mask = 0;
154         mm->swap_address = 0;
155         pprev = &mm->mmap;
156
157         /*
158          * Add it to the mmlist after the parent.
159          * Doing it this way means that we can order the list,
160          * and fork() won't mess up the ordering significantly.
161          * Add it first so that swapoff can see any swap entries.
162          */
163         spin_lock(&mmlist_lock);
164         list_add(&mm->mmlist, &current->mm->mmlist);
165         mmlist_nr++;
166         spin_unlock(&mmlist_lock);
167
168         for (mpnt = current->mm->mmap ; mpnt ; mpnt = mpnt->vm_next) {
169                 struct file *file;
170
171                 retval = -ENOMEM;
172                 if(mpnt->vm_flags & VM_DONTCOPY)
173                         continue;
174                 tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
175                 if (!tmp)
176                         goto fail_nomem;
177                 *tmp = *mpnt;
178                 tmp->vm_flags &= ~VM_LOCKED;
179                 tmp->vm_mm = mm;
180                 tmp->vm_next = NULL;
181                 file = tmp->vm_file;
182                 if (file) {
183                         struct inode *inode = file->f_dentry->d_inode;
184                         get_file(file);
185                         if (tmp->vm_flags & VM_DENYWRITE)
186                                 atomic_dec(&inode->i_writecount);
187       
188                         /* insert tmp into the share list, just after mpnt */
189                         spin_lock(&inode->i_mapping->i_shared_lock);
190                         if((tmp->vm_next_share = mpnt->vm_next_share) != NULL)
191                                 mpnt->vm_next_share->vm_pprev_share =
192                                         &tmp->vm_next_share;
193                         mpnt->vm_next_share = tmp;
194                         tmp->vm_pprev_share = &mpnt->vm_next_share;
195                         spin_unlock(&inode->i_mapping->i_shared_lock);
196                 }
197
198                 /*
199                  * Link in the new vma and copy the page table entries:
200                  * link in first so that swapoff can see swap entries.
201                  */
202                 spin_lock(&mm->page_table_lock);
203                 *pprev = tmp;
204                 pprev = &tmp->vm_next;
205                 mm->map_count++;
206                 retval = copy_page_range(mm, current->mm, tmp);
207                 spin_unlock(&mm->page_table_lock);
208
209                 if (tmp->vm_ops && tmp->vm_ops->open)
210                         tmp->vm_ops->open(tmp);
211
212                 if (retval)
213                         goto fail_nomem;
214         }
215         retval = 0;
216         build_mmap_rb(mm);
217
218 fail_nomem:
219         flush_tlb_mm(current->mm);
220         return retval;
221 }
222
223 spinlock_t mmlist_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
224 int mmlist_nr;
225
226 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
227 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
228
229 static struct mm_struct * mm_init(struct mm_struct * mm)
230 {
231         atomic_set(&mm->mm_users, 1);
232         atomic_set(&mm->mm_count, 1);
233         init_rwsem(&mm->mmap_sem);
234         mm->page_table_lock = SPIN_LOCK_UNLOCKED;
235         mm->pgd = pgd_alloc(mm);
236         mm->def_flags = 0;
237         if (mm->pgd)
238                 return mm;
239         free_mm(mm);
240         return NULL;
241 }
242         
243
244 /*
245  * Allocate and initialize an mm_struct.
246  */
247 struct mm_struct * mm_alloc(void)
248 {
249         struct mm_struct * mm;
250
251         mm = allocate_mm();
252         if (mm) {
253                 memset(mm, 0, sizeof(*mm));
254                 return mm_init(mm);
255         }
256         return NULL;
257 }
258
259 /*
260  * Called when the last reference to the mm
261  * is dropped: either by a lazy thread or by
262  * mmput. Free the page directory and the mm.
263  */
264 inline void __mmdrop(struct mm_struct *mm)
265 {
266         BUG_ON(mm == &init_mm);
267         pgd_free(mm->pgd);
268         destroy_context(mm);
269         free_mm(mm);
270 }
271
272 /*
273  * Decrement the use count and release all resources for an mm.
274  */
275 void mmput(struct mm_struct *mm)
276 {
277         if (atomic_dec_and_lock(&mm->mm_users, &mmlist_lock)) {
278                 extern struct mm_struct *swap_mm;
279                 if (swap_mm == mm)
280                         swap_mm = list_entry(mm->mmlist.next, struct mm_struct, mmlist);
281                 list_del(&mm->mmlist);
282                 mmlist_nr--;
283                 spin_unlock(&mmlist_lock);
284                 exit_mmap(mm);
285                 mmdrop(mm);
286         }
287 }
288
289 /* Please note the differences between mmput and mm_release.
290  * mmput is called whenever we stop holding onto a mm_struct,
291  * error success whatever.
292  *
293  * mm_release is called after a mm_struct has been removed
294  * from the current process.
295  *
296  * This difference is important for error handling, when we
297  * only half set up a mm_struct for a new process and need to restore
298  * the old one.  Because we mmput the new mm_struct before
299  * restoring the old one. . .
300  * Eric Biederman 10 January 1998
301  */
302 void mm_release(void)
303 {
304         struct task_struct *tsk = current;
305         struct completion *vfork_done = tsk->vfork_done;
306
307         /* notify parent sleeping on vfork() */
308         if (vfork_done) {
309                 tsk->vfork_done = NULL;
310                 complete(vfork_done);
311         }
312 }
313
314 static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
315 {
316         struct mm_struct * mm, *oldmm;
317         int retval;
318
319         tsk->min_flt = tsk->maj_flt = 0;
320         tsk->cmin_flt = tsk->cmaj_flt = 0;
321         tsk->nswap = tsk->cnswap = 0;
322
323         tsk->mm = NULL;
324         tsk->active_mm = NULL;
325
326         /*
327          * Are we cloning a kernel thread?
328          *
329          * We need to steal a active VM for that..
330          */
331         oldmm = current->mm;
332         if (!oldmm)
333                 return 0;
334
335         if (clone_flags & CLONE_VM) {
336                 atomic_inc(&oldmm->mm_users);
337                 mm = oldmm;
338                 goto good_mm;
339         }
340
341         retval = -ENOMEM;
342         mm = allocate_mm();
343         if (!mm)
344                 goto fail_nomem;
345
346         /* Copy the current MM stuff.. */
347         memcpy(mm, oldmm, sizeof(*mm));
348         if (!mm_init(mm))
349                 goto fail_nomem;
350
351         if (init_new_context(tsk,mm))
352                 goto free_pt;
353
354         down_write(&oldmm->mmap_sem);
355         retval = dup_mmap(mm);
356         up_write(&oldmm->mmap_sem);
357
358         if (retval)
359                 goto free_pt;
360
361         /*
362          * child gets a private LDT (if there was an LDT in the parent)
363          */
364         copy_segments(tsk, mm);
365
366 good_mm:
367         tsk->mm = mm;
368         tsk->active_mm = mm;
369         return 0;
370
371 free_pt:
372         mmput(mm);
373 fail_nomem:
374         return retval;
375 }
376
377 static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
378 {
379         struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
380         /* We don't need to lock fs - think why ;-) */
381         if (fs) {
382                 atomic_set(&fs->count, 1);
383                 fs->lock = RW_LOCK_UNLOCKED;
384                 fs->umask = old->umask;
385                 read_lock(&old->lock);
386                 fs->rootmnt = mntget(old->rootmnt);
387                 fs->root = dget(old->root);
388                 fs->pwdmnt = mntget(old->pwdmnt);
389                 fs->pwd = dget(old->pwd);
390                 if (old->altroot) {
391                         fs->altrootmnt = mntget(old->altrootmnt);
392                         fs->altroot = dget(old->altroot);
393                 } else {
394                         fs->altrootmnt = NULL;
395                         fs->altroot = NULL;
396                 }       
397                 read_unlock(&old->lock);
398         }
399         return fs;
400 }
401
402 struct fs_struct *copy_fs_struct(struct fs_struct *old)
403 {
404         return __copy_fs_struct(old);
405 }
406
407 static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
408 {
409         if (clone_flags & CLONE_FS) {
410                 atomic_inc(&current->fs->count);
411                 return 0;
412         }
413         tsk->fs = __copy_fs_struct(current->fs);
414         if (!tsk->fs)
415                 return -1;
416         return 0;
417 }
418
419 static int count_open_files(struct files_struct *files, int size)
420 {
421         int i;
422         
423         /* Find the last open fd */
424         for (i = size/(8*sizeof(long)); i > 0; ) {
425                 if (files->open_fds->fds_bits[--i])
426                         break;
427         }
428         i = (i+1) * 8 * sizeof(long);
429         return i;
430 }
431
432 static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
433 {
434         struct files_struct *oldf, *newf;
435         struct file **old_fds, **new_fds;
436         int open_files, nfds, size, i, error = 0;
437
438         /*
439          * A background process may not have any files ...
440          */
441         oldf = current->files;
442         if (!oldf)
443                 goto out;
444
445         if (clone_flags & CLONE_FILES) {
446                 atomic_inc(&oldf->count);
447                 goto out;
448         }
449
450         tsk->files = NULL;
451         error = -ENOMEM;
452         newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
453         if (!newf) 
454                 goto out;
455
456         atomic_set(&newf->count, 1);
457
458         newf->file_lock     = RW_LOCK_UNLOCKED;
459         newf->next_fd       = 0;
460         newf->max_fds       = NR_OPEN_DEFAULT;
461         newf->max_fdset     = __FD_SETSIZE;
462         newf->close_on_exec = &newf->close_on_exec_init;
463         newf->open_fds      = &newf->open_fds_init;
464         newf->fd            = &newf->fd_array[0];
465
466         /* We don't yet have the oldf readlock, but even if the old
467            fdset gets grown now, we'll only copy up to "size" fds */
468         size = oldf->max_fdset;
469         if (size > __FD_SETSIZE) {
470                 newf->max_fdset = 0;
471                 write_lock(&newf->file_lock);
472                 error = expand_fdset(newf, size-1);
473                 write_unlock(&newf->file_lock);
474                 if (error)
475                         goto out_release;
476         }
477         read_lock(&oldf->file_lock);
478
479         open_files = count_open_files(oldf, size);
480
481         /*
482          * Check whether we need to allocate a larger fd array.
483          * Note: we're not a clone task, so the open count won't
484          * change.
485          */
486         nfds = NR_OPEN_DEFAULT;
487         if (open_files > nfds) {
488                 read_unlock(&oldf->file_lock);
489                 newf->max_fds = 0;
490                 write_lock(&newf->file_lock);
491                 error = expand_fd_array(newf, open_files-1);
492                 write_unlock(&newf->file_lock);
493                 if (error) 
494                         goto out_release;
495                 nfds = newf->max_fds;
496                 read_lock(&oldf->file_lock);
497         }
498
499         old_fds = oldf->fd;
500         new_fds = newf->fd;
501
502         memcpy(newf->open_fds->fds_bits, oldf->open_fds->fds_bits, open_files/8);
503         memcpy(newf->close_on_exec->fds_bits, oldf->close_on_exec->fds_bits, open_files/8);
504
505         for (i = open_files; i != 0; i--) {
506                 struct file *f = *old_fds++;
507                 if (f)
508                         get_file(f);
509                 *new_fds++ = f;
510         }
511         read_unlock(&oldf->file_lock);
512
513         /* compute the remainder to be cleared */
514         size = (newf->max_fds - open_files) * sizeof(struct file *);
515
516         /* This is long word aligned thus could use a optimized version */ 
517         memset(new_fds, 0, size); 
518
519         if (newf->max_fdset > open_files) {
520                 int left = (newf->max_fdset-open_files)/8;
521                 int start = open_files / (8 * sizeof(unsigned long));
522                 
523                 memset(&newf->open_fds->fds_bits[start], 0, left);
524                 memset(&newf->close_on_exec->fds_bits[start], 0, left);
525         }
526
527         tsk->files = newf;
528         error = 0;
529 out:
530         return error;
531
532 out_release:
533         free_fdset (newf->close_on_exec, newf->max_fdset);
534         free_fdset (newf->open_fds, newf->max_fdset);
535         kmem_cache_free(files_cachep, newf);
536         goto out;
537 }
538
539 static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
540 {
541         struct signal_struct *sig;
542
543         if (clone_flags & CLONE_SIGHAND) {
544                 atomic_inc(&current->sig->count);
545                 return 0;
546         }
547         sig = kmem_cache_alloc(sigact_cachep, GFP_KERNEL);
548         tsk->sig = sig;
549         if (!sig)
550                 return -1;
551         spin_lock_init(&sig->siglock);
552         atomic_set(&sig->count, 1);
553         memcpy(tsk->sig->action, current->sig->action, sizeof(tsk->sig->action));
554         return 0;
555 }
556
557 static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
558 {
559         unsigned long new_flags = p->flags;
560
561         new_flags &= ~(PF_SUPERPRIV | PF_USEDFPU);
562         new_flags |= PF_FORKNOEXEC;
563         if (!(clone_flags & CLONE_PTRACE))
564                 p->ptrace = 0;
565         p->flags = new_flags;
566 }
567
568 /*
569  *  Ok, this is the main fork-routine. It copies the system process
570  * information (task[nr]) and sets up the necessary registers. It also
571  * copies the data segment in its entirety.  The "stack_start" and
572  * "stack_top" arguments are simply passed along to the platform
573  * specific copy_thread() routine.  Most platforms ignore stack_top.
574  * For an example that's using stack_top, see
575  * arch/ia64/kernel/process.c.
576  */
577 int do_fork(unsigned long clone_flags, unsigned long stack_start,
578             struct pt_regs *regs, unsigned long stack_size)
579 {
580         int retval;
581         struct task_struct *p;
582         struct completion vfork;
583
584         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
585                 return -EINVAL;
586
587         retval = -EPERM;
588
589         /* 
590          * CLONE_PID is only allowed for the initial SMP swapper
591          * calls
592          */
593         if (clone_flags & CLONE_PID) {
594                 if (current->pid)
595                         goto fork_out;
596         }
597
598         retval = -ENOMEM;
599         p = alloc_task_struct();
600         if (!p)
601                 goto fork_out;
602
603         *p = *current;
604
605         retval = -EAGAIN;
606         /*
607          * Check if we are over our maximum process limit, but be sure to
608          * exclude root. This is needed to make it possible for login and
609          * friends to set the per-user process limit to something lower
610          * than the amount of processes root is running. -- Rik
611          */
612         if (atomic_read(&p->user->processes) >= p->rlim[RLIMIT_NPROC].rlim_cur
613                       && !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
614                 goto bad_fork_free;
615
616         atomic_inc(&p->user->__count);
617         atomic_inc(&p->user->processes);
618
619         /*
620          * Counter increases are protected by
621          * the kernel lock so nr_threads can't
622          * increase under us (but it may decrease).
623          */
624         if (nr_threads >= max_threads)
625                 goto bad_fork_cleanup_count;
626         
627         get_exec_domain(p->exec_domain);
628
629         if (p->binfmt && p->binfmt->module)
630                 __MOD_INC_USE_COUNT(p->binfmt->module);
631
632         p->did_exec = 0;
633         p->swappable = 0;
634         p->state = TASK_UNINTERRUPTIBLE;
635
636         copy_flags(clone_flags, p);
637         p->pid = get_pid(clone_flags);
638         if (p->pid == 0 && current->pid != 0)
639                 goto bad_fork_cleanup;
640
641         p->run_list.next = NULL;
642         p->run_list.prev = NULL;
643
644         p->p_cptr = NULL;
645         init_waitqueue_head(&p->wait_chldexit);
646         p->vfork_done = NULL;
647         if (clone_flags & CLONE_VFORK) {
648                 p->vfork_done = &vfork;
649                 init_completion(&vfork);
650         }
651         spin_lock_init(&p->alloc_lock);
652
653         p->sigpending = 0;
654         init_sigpending(&p->pending);
655
656         p->it_real_value = p->it_virt_value = p->it_prof_value = 0;
657         p->it_real_incr = p->it_virt_incr = p->it_prof_incr = 0;
658         init_timer(&p->real_timer);
659         p->real_timer.data = (unsigned long) p;
660
661         p->leader = 0;          /* session leadership doesn't inherit */
662         p->tty_old_pgrp = 0;
663         p->times.tms_utime = p->times.tms_stime = 0;
664         p->times.tms_cutime = p->times.tms_cstime = 0;
665 #ifdef CONFIG_SMP
666         {
667                 int i;
668                 p->cpus_runnable = ~0UL;
669                 p->processor = current->processor;
670                 /* ?? should we just memset this ?? */
671                 for(i = 0; i < smp_num_cpus; i++)
672                         p->per_cpu_utime[i] = p->per_cpu_stime[i] = 0;
673                 spin_lock_init(&p->sigmask_lock);
674         }
675 #endif
676         p->lock_depth = -1;             /* -1 = no lock */
677         p->start_time = jiffies;
678
679         INIT_LIST_HEAD(&p->local_pages);
680
681         retval = -ENOMEM;
682         /* copy all the process information */
683         if (copy_files(clone_flags, p))
684                 goto bad_fork_cleanup;
685         if (copy_fs(clone_flags, p))
686                 goto bad_fork_cleanup_files;
687         if (copy_sighand(clone_flags, p))
688                 goto bad_fork_cleanup_fs;
689         if (copy_mm(clone_flags, p))
690                 goto bad_fork_cleanup_sighand;
691         if (copy_namespace(clone_flags, p))
692                 goto bad_fork_cleanup_mm;
693         retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
694         if (retval)
695                 goto bad_fork_cleanup_namespace;
696         p->semundo = NULL;
697         
698         /* Our parent execution domain becomes current domain
699            These must match for thread signalling to apply */
700            
701         p->parent_exec_id = p->self_exec_id;
702
703         /* ok, now we should be set up.. */
704         p->swappable = 1;
705         p->exit_signal = clone_flags & CSIGNAL;
706         p->pdeath_signal = 0;
707
708         /*
709          * "share" dynamic priority between parent and child, thus the
710          * total amount of dynamic priorities in the system doesn't change,
711          * more scheduling fairness. This is only important in the first
712          * timeslice, on the long run the scheduling behaviour is unchanged.
713          */
714         p->counter = (current->counter + 1) >> 1;
715         current->counter >>= 1;
716         if (!current->counter)
717                 current->need_resched = 1;
718
719         /*
720          * Ok, add it to the run-queues and make it
721          * visible to the rest of the system.
722          *
723          * Let it rip!
724          */
725         retval = p->pid;
726         p->tgid = retval;
727         INIT_LIST_HEAD(&p->thread_group);
728
729         /* Need tasklist lock for parent etc handling! */
730         write_lock_irq(&tasklist_lock);
731
732         /* CLONE_PARENT re-uses the old parent */
733         p->p_opptr = current->p_opptr;
734         p->p_pptr = current->p_pptr;
735         if (!(clone_flags & CLONE_PARENT)) {
736                 p->p_opptr = current;
737                 if (!(p->ptrace & PT_PTRACED))
738                         p->p_pptr = current;
739         }
740
741         if (clone_flags & CLONE_THREAD) {
742                 p->tgid = current->tgid;
743                 list_add(&p->thread_group, &current->thread_group);
744         }
745
746         SET_LINKS(p);
747         hash_pid(p);
748         nr_threads++;
749         write_unlock_irq(&tasklist_lock);
750
751         if (p->ptrace & PT_PTRACED)
752                 send_sig(SIGSTOP, p, 1);
753
754         wake_up_process(p);             /* do this last */
755         ++total_forks;
756         if (clone_flags & CLONE_VFORK)
757                 wait_for_completion(&vfork);
758
759 fork_out:
760         return retval;
761
762 bad_fork_cleanup_namespace:
763         exit_namespace(p);
764 bad_fork_cleanup_mm:
765         exit_mm(p);
766 bad_fork_cleanup_sighand:
767         exit_sighand(p);
768 bad_fork_cleanup_fs:
769         exit_fs(p); /* blocking */
770 bad_fork_cleanup_files:
771         exit_files(p); /* blocking */
772 bad_fork_cleanup:
773         put_exec_domain(p->exec_domain);
774         if (p->binfmt && p->binfmt->module)
775                 __MOD_DEC_USE_COUNT(p->binfmt->module);
776 bad_fork_cleanup_count:
777         atomic_dec(&p->user->processes);
778         free_uid(p->user);
779 bad_fork_free:
780         free_task_struct(p);
781         goto fork_out;
782 }
783
784 /* SLAB cache for signal_struct structures (tsk->sig) */
785 kmem_cache_t *sigact_cachep;
786
787 /* SLAB cache for files_struct structures (tsk->files) */
788 kmem_cache_t *files_cachep;
789
790 /* SLAB cache for fs_struct structures (tsk->fs) */
791 kmem_cache_t *fs_cachep;
792
793 /* SLAB cache for vm_area_struct structures */
794 kmem_cache_t *vm_area_cachep;
795
796 /* SLAB cache for mm_struct structures (tsk->mm) */
797 kmem_cache_t *mm_cachep;
798
799 void __init proc_caches_init(void)
800 {
801         sigact_cachep = kmem_cache_create("signal_act",
802                         sizeof(struct signal_struct), 0,
803                         SLAB_HWCACHE_ALIGN, NULL, NULL);
804         if (!sigact_cachep)
805                 panic("Cannot create signal action SLAB cache");
806
807         files_cachep = kmem_cache_create("files_cache", 
808                          sizeof(struct files_struct), 0, 
809                          SLAB_HWCACHE_ALIGN, NULL, NULL);
810         if (!files_cachep) 
811                 panic("Cannot create files SLAB cache");
812
813         fs_cachep = kmem_cache_create("fs_cache", 
814                          sizeof(struct fs_struct), 0, 
815                          SLAB_HWCACHE_ALIGN, NULL, NULL);
816         if (!fs_cachep) 
817                 panic("Cannot create fs_struct SLAB cache");
818  
819         vm_area_cachep = kmem_cache_create("vm_area_struct",
820                         sizeof(struct vm_area_struct), 0,
821                         SLAB_HWCACHE_ALIGN, NULL, NULL);
822         if(!vm_area_cachep)
823                 panic("vma_init: Cannot alloc vm_area_struct SLAB cache");
824
825         mm_cachep = kmem_cache_create("mm_struct",
826                         sizeof(struct mm_struct), 0,
827                         SLAB_HWCACHE_ALIGN, NULL, NULL);
828         if(!mm_cachep)
829                 panic("vma_init: Cannot alloc mm_struct SLAB cache");
830 }