2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 #include <linux/config.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/unistd.h>
13 #include <linux/smp_lock.h>
14 #include <linux/init.h>
15 #include <linux/sched.h>
17 #include <asm/uaccess.h>
20 * SLAB caches for signal bits.
26 #define SIG_SLAB_DEBUG (SLAB_DEBUG_FREE | SLAB_RED_ZONE /* | SLAB_POISON */)
28 #define SIG_SLAB_DEBUG 0
31 static kmem_cache_t *sigqueue_cachep;
33 atomic_t nr_queued_signals;
34 int max_queued_signals = 1024;
36 void __init signals_init(void)
39 kmem_cache_create("sigqueue",
40 sizeof(struct sigqueue),
41 __alignof__(struct sigqueue),
42 SIG_SLAB_DEBUG, NULL, NULL);
44 panic("signals_init(): cannot create sigqueue SLAB cache");
48 /* Given the mask, find the first available signal that should be serviced. */
51 next_signal(struct task_struct *tsk, sigset_t *mask)
53 unsigned long i, *s, *m, x;
56 s = tsk->pending.signal.sig;
58 switch (_NSIG_WORDS) {
60 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
61 if ((x = *s &~ *m) != 0) {
62 sig = ffz(~x) + i*_NSIG_BPW + 1;
67 case 2: if ((x = s[0] &~ m[0]) != 0)
69 else if ((x = s[1] &~ m[1]) != 0)
76 case 1: if ((x = *s &~ *m) != 0)
84 static void flush_sigqueue(struct sigpending *queue)
86 struct sigqueue *q, *n;
88 sigemptyset(&queue->signal);
91 queue->tail = &queue->head;
95 kmem_cache_free(sigqueue_cachep, q);
96 atomic_dec(&nr_queued_signals);
102 * Flush all pending signals for a task.
106 flush_signals(struct task_struct *t)
109 flush_sigqueue(&t->pending);
112 void exit_sighand(struct task_struct *tsk)
114 struct signal_struct * sig = tsk->sig;
116 spin_lock_irq(&tsk->sigmask_lock);
119 if (atomic_dec_and_test(&sig->count))
120 kmem_cache_free(sigact_cachep, sig);
123 flush_sigqueue(&tsk->pending);
124 spin_unlock_irq(&tsk->sigmask_lock);
128 * Flush all handlers for a task.
132 flush_signal_handlers(struct task_struct *t)
135 struct k_sigaction *ka = &t->sig->action[0];
136 for (i = _NSIG ; i != 0 ; i--) {
137 if (ka->sa.sa_handler != SIG_IGN)
138 ka->sa.sa_handler = SIG_DFL;
140 sigemptyset(&ka->sa.sa_mask);
146 * sig_exit - cause the current task to exit due to a signal.
150 sig_exit(int sig, int exit_code, struct siginfo *info)
152 struct task_struct *t;
154 sigaddset(¤t->pending.signal, sig);
155 recalc_sigpending(current);
156 current->flags |= PF_SIGNALED;
158 /* Propagate the signal to all the tasks in
161 if (info && (unsigned long)info != 1
162 && info->si_code != SI_TKILL) {
163 read_lock(&tasklist_lock);
165 force_sig_info(sig, info, t);
167 read_unlock(&tasklist_lock);
174 /* Notify the system that a driver wants to block all signals for this
175 * process, and wants to be notified if any signals at all were to be
176 * sent/acted upon. If the notifier routine returns non-zero, then the
177 * signal will be acted upon after all. If the notifier routine returns 0,
178 * then then signal will be blocked. Only one block per process is
179 * allowed. priv is a pointer to private data that the notifier routine
180 * can use to determine if the signal should be blocked or not. */
183 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
187 spin_lock_irqsave(¤t->sigmask_lock, flags);
188 current->notifier_mask = mask;
189 current->notifier_data = priv;
190 current->notifier = notifier;
191 spin_unlock_irqrestore(¤t->sigmask_lock, flags);
194 /* Notify the system that blocking has ended. */
197 unblock_all_signals(void)
201 spin_lock_irqsave(¤t->sigmask_lock, flags);
202 current->notifier = NULL;
203 current->notifier_data = NULL;
204 recalc_sigpending(current);
205 spin_unlock_irqrestore(¤t->sigmask_lock, flags);
208 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
210 if (sigismember(&list->signal, sig)) {
211 /* Collect the siginfo appropriate to this signal. */
212 struct sigqueue *q, **pp;
214 while ((q = *pp) != NULL) {
215 if (q->info.si_signo == sig)
220 /* Ok, it wasn't in the queue. We must have
221 been out of queue space. So zero out the
223 sigdelset(&list->signal, sig);
224 info->si_signo = sig;
232 if ((*pp = q->next) == NULL)
235 /* Copy the sigqueue information and free the queue entry */
236 copy_siginfo(info, &q->info);
237 kmem_cache_free(sigqueue_cachep,q);
238 atomic_dec(&nr_queued_signals);
240 /* Non-RT signals can exist multiple times.. */
241 if (sig >= SIGRTMIN) {
242 while ((q = *pp) != NULL) {
243 if (q->info.si_signo == sig)
249 sigdelset(&list->signal, sig);
257 * Dequeue a signal and return the element to the caller, which is
258 * expected to free it.
260 * All callers must be holding current->sigmask_lock.
264 dequeue_signal(sigset_t *mask, siginfo_t *info)
269 printk("SIG dequeue (%s:%d): %d ", current->comm, current->pid,
270 signal_pending(current));
273 sig = next_signal(current, mask);
275 if (current->notifier) {
276 if (sigismember(current->notifier_mask, sig)) {
277 if (!(current->notifier)(current->notifier_data)) {
278 current->sigpending = 0;
284 if (!collect_signal(sig, ¤t->pending, info))
287 /* XXX: Once POSIX.1b timers are in, if si_code == SI_TIMER,
288 we need to xchg out the timer overrun values. */
290 recalc_sigpending(current);
293 printk(" %d -> %d\n", signal_pending(current), sig);
299 static int rm_from_queue(int sig, struct sigpending *s)
301 struct sigqueue *q, **pp;
303 if (!sigismember(&s->signal, sig))
306 sigdelset(&s->signal, sig);
310 while ((q = *pp) != NULL) {
311 if (q->info.si_signo == sig) {
312 if ((*pp = q->next) == NULL)
314 kmem_cache_free(sigqueue_cachep,q);
315 atomic_dec(&nr_queued_signals);
324 * Remove signal sig from t->pending.
325 * Returns 1 if sig was found.
327 * All callers must be holding t->sigmask_lock.
329 static int rm_sig_from_queue(int sig, struct task_struct *t)
331 return rm_from_queue(sig, &t->pending);
335 * Bad permissions for sending the signal
337 int bad_signal(int sig, struct siginfo *info, struct task_struct *t)
339 return (!info || ((unsigned long)info != 1 && SI_FROMUSER(info)))
340 && ((sig != SIGCONT) || (current->session != t->session))
341 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
342 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
343 && !capable(CAP_KILL);
348 * < 0 : global action (kill - spread to all non-blocked threads)
352 static int signal_type(int sig, struct signal_struct *signals)
354 unsigned long handler;
359 handler = (unsigned long) signals->action[sig-1].sa.sa_handler;
363 /* "Ignore" handler.. Illogical, but that has an implicit handler for SIGCHLD */
365 return sig == SIGCHLD;
367 /* Default handler. Normally lethal, but.. */
371 case SIGCONT: case SIGWINCH:
372 case SIGCHLD: case SIGURG:
375 /* Implicit behaviour */
376 case SIGTSTP: case SIGTTIN: case SIGTTOU:
379 /* Implicit actions (kill or do special stuff) */
387 * Determine whether a signal should be posted or not.
389 * Signals with SIG_IGN can be ignored, except for the
390 * special case of a SIGCHLD.
392 * Some signals with SIG_DFL default to a non-action.
394 static int ignored_signal(int sig, struct task_struct *t)
396 /* Don't ignore traced or blocked signals */
397 if ((t->ptrace & PT_PTRACED) || sigismember(&t->blocked, sig))
400 return signal_type(sig, t->sig) == 0;
404 * Handle TASK_STOPPED cases etc implicit behaviour
405 * of certain magical signals.
407 * SIGKILL gets spread out to every thread.
409 static void handle_stop_signal(int sig, struct task_struct *t)
412 case SIGKILL: case SIGCONT:
413 /* Wake up the process if stopped. */
414 if (t->state == TASK_STOPPED)
417 rm_sig_from_queue(SIGSTOP, t);
418 rm_sig_from_queue(SIGTSTP, t);
419 rm_sig_from_queue(SIGTTOU, t);
420 rm_sig_from_queue(SIGTTIN, t);
423 case SIGSTOP: case SIGTSTP:
424 case SIGTTIN: case SIGTTOU:
425 /* If we're stopping again, cancel SIGCONT */
426 rm_sig_from_queue(SIGCONT, t);
431 static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
433 struct sigqueue * q = NULL;
435 /* Real-time signals must be queued if sent by sigqueue, or
436 some other real-time mechanism. It is implementation
437 defined whether kill() does so. We attempt to do so, on
438 the principle of least surprise, but since kill is not
439 allowed to fail with EAGAIN when low on memory we just
440 make sure at least one signal gets delivered and don't
441 pass on the info struct. */
443 if (atomic_read(&nr_queued_signals) < max_queued_signals) {
444 q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
448 atomic_inc(&nr_queued_signals);
451 signals->tail = &q->next;
452 switch ((unsigned long) info) {
454 q->info.si_signo = sig;
455 q->info.si_errno = 0;
456 q->info.si_code = SI_USER;
457 q->info.si_pid = current->pid;
458 q->info.si_uid = current->uid;
461 q->info.si_signo = sig;
462 q->info.si_errno = 0;
463 q->info.si_code = SI_KERNEL;
468 copy_siginfo(&q->info, info);
471 } else if (sig >= SIGRTMIN && info && (unsigned long)info != 1
472 && info->si_code != SI_USER) {
474 * Queue overflow, abort. We may abort if the signal was rt
475 * and sent by user using something other than kill().
480 sigaddset(&signals->signal, sig);
485 * Tell a process that it has a new active signal..
487 * NOTE! we rely on the previous spin_lock to
488 * lock interrupts for us! We can only be called with
489 * "sigmask_lock" held, and the local interrupt must
490 * have been disabled when that got acquired!
492 * No need to set need_resched since signal event passing
493 * goes through ->blocked
495 static inline void signal_wake_up(struct task_struct *t)
501 * If the task is running on a different CPU
502 * force a reschedule on the other CPU to make
503 * it notice the new signal quickly.
505 * The code below is a tad loose and might occasionally
506 * kick the wrong CPU if we catch the process in the
507 * process of changing - but no harm is done by that
508 * other than doing an extra (lightweight) IPI interrupt.
510 spin_lock(&runqueue_lock);
511 if (task_has_cpu(t) && t->processor != smp_processor_id())
512 smp_send_reschedule(t->processor);
513 spin_unlock(&runqueue_lock);
514 #endif /* CONFIG_SMP */
516 if (t->state & TASK_INTERRUPTIBLE) {
522 static int deliver_signal(int sig, struct siginfo *info, struct task_struct *t)
524 int retval = send_signal(sig, info, &t->pending);
526 if (!retval && !sigismember(&t->blocked, sig))
533 send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
540 printk("SIG queue (%s:%d): %d ", t->comm, t->pid, sig);
544 if (sig < 0 || sig > _NSIG)
546 /* The somewhat baroque permissions check... */
548 if (bad_signal(sig, info, t))
551 /* The null signal is a permissions and process existence probe.
552 No signal is actually delivered. Same goes for zombies. */
557 spin_lock_irqsave(&t->sigmask_lock, flags);
558 handle_stop_signal(sig, t);
560 /* Optimize away the signal, if it's a signal that can be
561 handled immediately (ie non-blocked and untraced) and
562 that is ignored (either explicitly or by default). */
564 if (ignored_signal(sig, t))
567 /* Support queueing exactly one non-rt signal, so that we
568 can get more detailed information about the cause of
570 if (sig < SIGRTMIN && sigismember(&t->pending.signal, sig))
573 ret = deliver_signal(sig, info, t);
575 spin_unlock_irqrestore(&t->sigmask_lock, flags);
578 printk(" %d -> %d\n", signal_pending(t), ret);
585 * Force a signal that the process can't ignore: if necessary
586 * we unblock the signal and change any SIG_IGN to SIG_DFL.
590 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
592 unsigned long int flags;
594 spin_lock_irqsave(&t->sigmask_lock, flags);
595 if (t->sig == NULL) {
596 spin_unlock_irqrestore(&t->sigmask_lock, flags);
600 if (t->sig->action[sig-1].sa.sa_handler == SIG_IGN)
601 t->sig->action[sig-1].sa.sa_handler = SIG_DFL;
602 sigdelset(&t->blocked, sig);
603 recalc_sigpending(t);
604 spin_unlock_irqrestore(&t->sigmask_lock, flags);
606 return send_sig_info(sig, info, t);
610 * kill_pg_info() sends a signal to a process group: this is what the tty
611 * control characters do (^C, ^Z etc)
615 kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
617 int retval = -EINVAL;
619 struct task_struct *p;
622 read_lock(&tasklist_lock);
624 if (p->pgrp == pgrp && thread_group_leader(p)) {
625 int err = send_sig_info(sig, info, p);
630 read_unlock(&tasklist_lock);
636 * kill_sl_info() sends a signal to the session leader: this is used
637 * to send SIGHUP to the controlling process of a terminal when
638 * the connection is lost.
642 kill_sl_info(int sig, struct siginfo *info, pid_t sess)
644 int retval = -EINVAL;
646 struct task_struct *p;
649 read_lock(&tasklist_lock);
651 if (p->leader && p->session == sess) {
652 int err = send_sig_info(sig, info, p);
657 read_unlock(&tasklist_lock);
663 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
666 struct task_struct *p;
668 read_lock(&tasklist_lock);
669 p = find_task_by_pid(pid);
672 if (!thread_group_leader(p)) {
673 struct task_struct *tg;
674 tg = find_task_by_pid(p->tgid);
678 error = send_sig_info(sig, info, p);
680 read_unlock(&tasklist_lock);
686 * kill_something_info() interprets pid in interesting ways just like kill(2).
688 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
689 * is probably wrong. Should make it like BSD or SYSV.
692 static int kill_something_info(int sig, struct siginfo *info, int pid)
695 return kill_pg_info(sig, info, current->pgrp);
696 } else if (pid == -1) {
697 int retval = 0, count = 0;
698 struct task_struct * p;
700 read_lock(&tasklist_lock);
702 if (p->pid > 1 && p != current && thread_group_leader(p)) {
703 int err = send_sig_info(sig, info, p);
709 read_unlock(&tasklist_lock);
710 return count ? retval : -ESRCH;
711 } else if (pid < 0) {
712 return kill_pg_info(sig, info, -pid);
714 return kill_proc_info(sig, info, pid);
719 * These are for backward compatibility with the rest of the kernel source.
723 send_sig(int sig, struct task_struct *p, int priv)
725 return send_sig_info(sig, (void*)(long)(priv != 0), p);
729 force_sig(int sig, struct task_struct *p)
731 force_sig_info(sig, (void*)1L, p);
735 kill_pg(pid_t pgrp, int sig, int priv)
737 return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp);
741 kill_sl(pid_t sess, int sig, int priv)
743 return kill_sl_info(sig, (void *)(long)(priv != 0), sess);
747 kill_proc(pid_t pid, int sig, int priv)
749 return kill_proc_info(sig, (void *)(long)(priv != 0), pid);
753 * Joy. Or not. Pthread wants us to wake up every thread
754 * in our parent group.
756 static void wake_up_parent(struct task_struct *parent)
758 struct task_struct *tsk = parent;
761 wake_up_interruptible(&tsk->wait_chldexit);
762 tsk = next_thread(tsk);
763 } while (tsk != parent);
767 * Let a parent know about a status change of a child.
770 void do_notify_parent(struct task_struct *tsk, int sig)
777 info.si_pid = tsk->pid;
778 info.si_uid = tsk->uid;
780 /* FIXME: find out whether or not this is supposed to be c*time. */
781 info.si_utime = tsk->times.tms_utime;
782 info.si_stime = tsk->times.tms_stime;
784 status = tsk->exit_code & 0x7f;
785 why = SI_KERNEL; /* shouldn't happen */
786 switch (tsk->state) {
788 /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
789 if (tsk->ptrace & PT_PTRACED)
796 if (tsk->exit_code & 0x80)
798 else if (tsk->exit_code & 0x7f)
802 status = tsk->exit_code >> 8;
807 info.si_status = status;
809 send_sig_info(sig, &info, tsk->p_pptr);
810 wake_up_parent(tsk->p_pptr);
815 * We need the tasklist lock because it's the only
816 * thing that protects out "parent" pointer.
818 * exit.c calls "do_notify_parent()" directly, because
819 * it already has the tasklist lock.
822 notify_parent(struct task_struct *tsk, int sig)
824 read_lock(&tasklist_lock);
825 do_notify_parent(tsk, sig);
826 read_unlock(&tasklist_lock);
829 EXPORT_SYMBOL(dequeue_signal);
830 EXPORT_SYMBOL(flush_signals);
831 EXPORT_SYMBOL(force_sig);
832 EXPORT_SYMBOL(force_sig_info);
833 EXPORT_SYMBOL(kill_pg);
834 EXPORT_SYMBOL(kill_pg_info);
835 EXPORT_SYMBOL(kill_proc);
836 EXPORT_SYMBOL(kill_proc_info);
837 EXPORT_SYMBOL(kill_sl);
838 EXPORT_SYMBOL(kill_sl_info);
839 EXPORT_SYMBOL(notify_parent);
840 EXPORT_SYMBOL(recalc_sigpending);
841 EXPORT_SYMBOL(send_sig);
842 EXPORT_SYMBOL(send_sig_info);
843 EXPORT_SYMBOL(block_all_signals);
844 EXPORT_SYMBOL(unblock_all_signals);
848 * System call entry points.
852 * We don't need to get the kernel lock - this is all local to this
853 * particular thread.. (and that's good, because this is _heavily_
854 * used by various programs)
858 sys_rt_sigprocmask(int how, sigset_t *set, sigset_t *oset, size_t sigsetsize)
861 sigset_t old_set, new_set;
863 /* XXX: Don't preclude handling different sized sigset_t's. */
864 if (sigsetsize != sizeof(sigset_t))
869 if (copy_from_user(&new_set, set, sizeof(*set)))
871 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
873 spin_lock_irq(¤t->sigmask_lock);
874 old_set = current->blocked;
882 sigorsets(&new_set, &old_set, &new_set);
885 signandsets(&new_set, &old_set, &new_set);
891 current->blocked = new_set;
892 recalc_sigpending(current);
893 spin_unlock_irq(¤t->sigmask_lock);
899 spin_lock_irq(¤t->sigmask_lock);
900 old_set = current->blocked;
901 spin_unlock_irq(¤t->sigmask_lock);
905 if (copy_to_user(oset, &old_set, sizeof(*oset)))
913 long do_sigpending(void *set, unsigned long sigsetsize)
915 long error = -EINVAL;
918 if (sigsetsize > sizeof(sigset_t))
921 spin_lock_irq(¤t->sigmask_lock);
922 sigandsets(&pending, ¤t->blocked, ¤t->pending.signal);
923 spin_unlock_irq(¤t->sigmask_lock);
926 if (!copy_to_user(set, &pending, sigsetsize))
933 sys_rt_sigpending(sigset_t *set, size_t sigsetsize)
935 return do_sigpending(set, sigsetsize);
939 sys_rt_sigtimedwait(const sigset_t *uthese, siginfo_t *uinfo,
940 const struct timespec *uts, size_t sigsetsize)
948 /* XXX: Don't preclude handling different sized sigset_t's. */
949 if (sigsetsize != sizeof(sigset_t))
952 if (copy_from_user(&these, uthese, sizeof(these)))
956 * Invert the set of allowed signals to get those we
959 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
963 if (copy_from_user(&ts, uts, sizeof(ts)))
965 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
970 spin_lock_irq(¤t->sigmask_lock);
971 sig = dequeue_signal(&these, &info);
973 timeout = MAX_SCHEDULE_TIMEOUT;
975 timeout = (timespec_to_jiffies(&ts)
976 + (ts.tv_sec || ts.tv_nsec));
979 /* None ready -- temporarily unblock those we're
980 * interested while we are sleeping in so that we'll
981 * be awakened when they arrive. */
982 sigset_t oldblocked = current->blocked;
983 sigandsets(¤t->blocked, ¤t->blocked, &these);
984 recalc_sigpending(current);
985 spin_unlock_irq(¤t->sigmask_lock);
987 current->state = TASK_INTERRUPTIBLE;
988 timeout = schedule_timeout(timeout);
990 spin_lock_irq(¤t->sigmask_lock);
991 sig = dequeue_signal(&these, &info);
992 current->blocked = oldblocked;
993 recalc_sigpending(current);
996 spin_unlock_irq(¤t->sigmask_lock);
1001 if (copy_siginfo_to_user(uinfo, &info))
1014 sys_kill(int pid, int sig)
1016 struct siginfo info;
1018 info.si_signo = sig;
1020 info.si_code = SI_USER;
1021 info.si_pid = current->pid;
1022 info.si_uid = current->uid;
1024 return kill_something_info(sig, &info, pid);
1028 * Kill only one task, even if it's a CLONE_THREAD task.
1031 sys_tkill(int pid, int sig)
1033 struct siginfo info;
1035 struct task_struct *p;
1037 /* This is only valid for single tasks */
1041 info.si_signo = sig;
1043 info.si_code = SI_TKILL;
1044 info.si_pid = current->pid;
1045 info.si_uid = current->uid;
1047 read_lock(&tasklist_lock);
1048 p = find_task_by_pid(pid);
1051 error = send_sig_info(sig, &info, p);
1053 read_unlock(&tasklist_lock);
1058 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo)
1062 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
1065 /* Not even root can pretend to send signals from the kernel.
1066 Nor can they impersonate a kill(), which adds source info. */
1067 if (info.si_code >= 0)
1069 info.si_signo = sig;
1071 /* POSIX.1b doesn't mention process groups. */
1072 return kill_proc_info(sig, &info, pid);
1076 do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
1078 struct k_sigaction *k;
1080 if (sig < 1 || sig > _NSIG ||
1081 (act && (sig == SIGKILL || sig == SIGSTOP)))
1084 k = ¤t->sig->action[sig-1];
1086 spin_lock(¤t->sig->siglock);
1093 sigdelsetmask(&k->sa.sa_mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
1097 * "Setting a signal action to SIG_IGN for a signal that is
1098 * pending shall cause the pending signal to be discarded,
1099 * whether or not it is blocked."
1101 * "Setting a signal action to SIG_DFL for a signal that is
1102 * pending and whose default action is to ignore the signal
1103 * (for example, SIGCHLD), shall cause the pending signal to
1104 * be discarded, whether or not it is blocked"
1106 * Note the silly behaviour of SIGCHLD: SIG_IGN means that the
1107 * signal isn't actually ignored, but does automatic child
1108 * reaping, while SIG_DFL is explicitly said by POSIX to force
1109 * the signal to be ignored.
1112 if (k->sa.sa_handler == SIG_IGN
1113 || (k->sa.sa_handler == SIG_DFL
1114 && (sig == SIGCONT ||
1117 sig == SIGWINCH))) {
1118 spin_lock_irq(¤t->sigmask_lock);
1119 if (rm_sig_from_queue(sig, current))
1120 recalc_sigpending(current);
1121 spin_unlock_irq(¤t->sigmask_lock);
1125 spin_unlock(¤t->sig->siglock);
1130 do_sigaltstack (const stack_t *uss, stack_t *uoss, unsigned long sp)
1136 oss.ss_sp = (void *) current->sas_ss_sp;
1137 oss.ss_size = current->sas_ss_size;
1138 oss.ss_flags = sas_ss_flags(sp);
1147 if (verify_area(VERIFY_READ, uss, sizeof(*uss))
1148 || __get_user(ss_sp, &uss->ss_sp)
1149 || __get_user(ss_flags, &uss->ss_flags)
1150 || __get_user(ss_size, &uss->ss_size))
1154 if (on_sig_stack (sp))
1160 * Note - this code used to test ss_flags incorrectly
1161 * old code may have been written using ss_flags==0
1162 * to mean ss_flags==SS_ONSTACK (as this was the only
1163 * way that worked) - this fix preserves that older
1166 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
1169 if (ss_flags == SS_DISABLE) {
1174 if (ss_size < MINSIGSTKSZ)
1178 current->sas_ss_sp = (unsigned long) ss_sp;
1179 current->sas_ss_size = ss_size;
1184 if (copy_to_user(uoss, &oss, sizeof(oss)))
1194 sys_sigpending(old_sigset_t *set)
1196 return do_sigpending(set, sizeof(*set));
1199 #if !defined(__alpha__)
1200 /* Alpha has its own versions with special arguments. */
1203 sys_sigprocmask(int how, old_sigset_t *set, old_sigset_t *oset)
1206 old_sigset_t old_set, new_set;
1210 if (copy_from_user(&new_set, set, sizeof(*set)))
1212 new_set &= ~(sigmask(SIGKILL)|sigmask(SIGSTOP));
1214 spin_lock_irq(¤t->sigmask_lock);
1215 old_set = current->blocked.sig[0];
1223 sigaddsetmask(¤t->blocked, new_set);
1226 sigdelsetmask(¤t->blocked, new_set);
1229 current->blocked.sig[0] = new_set;
1233 recalc_sigpending(current);
1234 spin_unlock_irq(¤t->sigmask_lock);
1240 old_set = current->blocked.sig[0];
1243 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1253 sys_rt_sigaction(int sig, const struct sigaction *act, struct sigaction *oact,
1256 struct k_sigaction new_sa, old_sa;
1259 /* XXX: Don't preclude handling different sized sigset_t's. */
1260 if (sigsetsize != sizeof(sigset_t))
1264 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
1268 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
1271 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
1277 #endif /* __sparc__ */
1280 #if !defined(__alpha__) && !defined(__ia64__)
1282 * For backwards compatibility. Functionality superseded by sigprocmask.
1288 return current->blocked.sig[0];
1292 sys_ssetmask(int newmask)
1296 spin_lock_irq(¤t->sigmask_lock);
1297 old = current->blocked.sig[0];
1299 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
1301 recalc_sigpending(current);
1302 spin_unlock_irq(¤t->sigmask_lock);
1306 #endif /* !defined(__alpha__) */
1308 #if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__)
1310 * For backwards compatibility. Functionality superseded by sigaction.
1312 asmlinkage unsigned long
1313 sys_signal(int sig, __sighandler_t handler)
1315 struct k_sigaction new_sa, old_sa;
1318 new_sa.sa.sa_handler = handler;
1319 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
1321 ret = do_sigaction(sig, &new_sa, &old_sa);
1323 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
1325 #endif /* !alpha && !__ia64__ && !defined(__mips__) */