X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;ds=sidebyside;f=kernel%2Fsignal.c;h=2ac3a668d9dd6adf8fb943ab8a0894f6b486e3fb;hb=0397bad5b413226b8f55f599b185ab506d13b078;hp=fb5da6d19f14eef721f28750307c9654f17a8bfa;hpb=c0341b0f47722fbe5ab45f436fc6ddc1c58c0a6f;p=powerpc.git diff --git a/kernel/signal.c b/kernel/signal.c index fb5da6d19f..2ac3a668d9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -12,7 +12,6 @@ #include #include -#include #include #include #include @@ -23,6 +22,10 @@ #include #include #include +#include +#include +#include + #include #include #include @@ -33,127 +36,8 @@ * SLAB caches for signal bits. */ -static kmem_cache_t *sigqueue_cachep; - -/* - * In POSIX a signal is sent either to a specific thread (Linux task) - * or to the process as a whole (Linux thread group). How the signal - * is sent determines whether it's to one thread or the whole group, - * which determines which signal mask(s) are involved in blocking it - * from being delivered until later. When the signal is delivered, - * either it's caught or ignored by a user handler or it has a default - * effect that applies to the whole thread group (POSIX process). - * - * The possible effects an unblocked signal set to SIG_DFL can have are: - * ignore - Nothing Happens - * terminate - kill the process, i.e. all threads in the group, - * similar to exit_group. The group leader (only) reports - * WIFSIGNALED status to its parent. - * coredump - write a core dump file describing all threads using - * the same mm and then kill all those threads - * stop - stop all the threads in the group, i.e. TASK_STOPPED state - * - * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored. - * Other signals when not blocked and set to SIG_DFL behaves as follows. - * The job control signals also have other special effects. - * - * +--------------------+------------------+ - * | POSIX signal | default action | - * +--------------------+------------------+ - * | SIGHUP | terminate | - * | SIGINT | terminate | - * | SIGQUIT | coredump | - * | SIGILL | coredump | - * | SIGTRAP | coredump | - * | SIGABRT/SIGIOT | coredump | - * | SIGBUS | coredump | - * | SIGFPE | coredump | - * | SIGKILL | terminate(+) | - * | SIGUSR1 | terminate | - * | SIGSEGV | coredump | - * | SIGUSR2 | terminate | - * | SIGPIPE | terminate | - * | SIGALRM | terminate | - * | SIGTERM | terminate | - * | SIGCHLD | ignore | - * | SIGCONT | ignore(*) | - * | SIGSTOP | stop(*)(+) | - * | SIGTSTP | stop(*) | - * | SIGTTIN | stop(*) | - * | SIGTTOU | stop(*) | - * | SIGURG | ignore | - * | SIGXCPU | coredump | - * | SIGXFSZ | coredump | - * | SIGVTALRM | terminate | - * | SIGPROF | terminate | - * | SIGPOLL/SIGIO | terminate | - * | SIGSYS/SIGUNUSED | coredump | - * | SIGSTKFLT | terminate | - * | SIGWINCH | ignore | - * | SIGPWR | terminate | - * | SIGRTMIN-SIGRTMAX | terminate | - * +--------------------+------------------+ - * | non-POSIX signal | default action | - * +--------------------+------------------+ - * | SIGEMT | coredump | - * +--------------------+------------------+ - * - * (+) For SIGKILL and SIGSTOP the action is "always", not just "default". - * (*) Special job control effects: - * When SIGCONT is sent, it resumes the process (all threads in the group) - * from TASK_STOPPED state and also clears any pending/queued stop signals - * (any of those marked with "stop(*)"). This happens regardless of blocking, - * catching, or ignoring SIGCONT. When any stop signal is sent, it clears - * any pending/queued SIGCONT signals; this happens regardless of blocking, - * catching, or ignored the stop signal, though (except for SIGSTOP) the - * default action of stopping the process may happen later or never. - */ - -#ifdef SIGEMT -#define M_SIGEMT M(SIGEMT) -#else -#define M_SIGEMT 0 -#endif - -#if SIGRTMIN > BITS_PER_LONG -#define M(sig) (1ULL << ((sig)-1)) -#else -#define M(sig) (1UL << ((sig)-1)) -#endif -#define T(sig, mask) (M(sig) & (mask)) - -#define SIG_KERNEL_ONLY_MASK (\ - M(SIGKILL) | M(SIGSTOP) ) - -#define SIG_KERNEL_STOP_MASK (\ - M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) ) - -#define SIG_KERNEL_COREDUMP_MASK (\ - M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \ - M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \ - M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT ) - -#define SIG_KERNEL_IGNORE_MASK (\ - M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) ) - -#define sig_kernel_only(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK)) -#define sig_kernel_coredump(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK)) -#define sig_kernel_ignore(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK)) -#define sig_kernel_stop(sig) \ - (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) - -#define sig_needs_tasklist(sig) ((sig) == SIGCONT) +static struct kmem_cache *sigqueue_cachep; -#define sig_user_defined(t, signr) \ - (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ - ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) - -#define sig_fatal(t, signr) \ - (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ - (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL) static int sig_ignored(struct task_struct *t, int sig) { @@ -267,18 +151,25 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, int override_rlimit) { struct sigqueue *q = NULL; + struct user_struct *user; - atomic_inc(&t->user->sigpending); + /* + * In order to avoid problems with "switch_user()", we want to make + * sure that the compiler doesn't re-load "t->user" + */ + user = t->user; + barrier(); + atomic_inc(&user->sigpending); if (override_rlimit || - atomic_read(&t->user->sigpending) <= + atomic_read(&user->sigpending) <= t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) q = kmem_cache_alloc(sigqueue_cachep, flags); if (unlikely(q == NULL)) { - atomic_dec(&t->user->sigpending); + atomic_dec(&user->sigpending); } else { INIT_LIST_HEAD(&q->list); q->flags = 0; - q->user = get_uid(t->user); + q->user = get_uid(user); } return(q); } @@ -318,6 +209,16 @@ void flush_signals(struct task_struct *t) spin_unlock_irqrestore(&t->sighand->siglock, flags); } +void ignore_signals(struct task_struct *t) +{ + int i; + + for (i = 0; i < _NSIG; ++i) + t->sighand->action[i].sa.sa_handler = SIG_IGN; + + flush_signals(t); +} + /* * Flush all handlers for a task. */ @@ -445,26 +346,50 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { int signr = __dequeue_signal(&tsk->pending, mask, info); - if (!signr) + if (!signr) { signr = __dequeue_signal(&tsk->signal->shared_pending, mask, info); + /* + * itimer signal ? + * + * itimers are process shared and we restart periodic + * itimers in the signal delivery path to prevent DoS + * attacks in the high resolution timer case. This is + * compliant with the old way of self restarting + * itimers, as the SIGALRM is a legacy signal and only + * queued once. Changing the restart behaviour to + * restart the timer in the signal dequeue path is + * reducing the timer noise on heavy loaded !highres + * systems too. + */ + if (unlikely(signr == SIGALRM)) { + struct hrtimer *tmr = &tsk->signal->real_timer; + + if (!hrtimer_is_queued(tmr) && + tsk->signal->it_real_incr.tv64 != 0) { + hrtimer_forward(tmr, tmr->base->get_time(), + tsk->signal->it_real_incr); + hrtimer_restart(tmr); + } + } + } recalc_sigpending_tsk(tsk); - if (signr && unlikely(sig_kernel_stop(signr))) { - /* - * Set a marker that we have dequeued a stop signal. Our - * caller might release the siglock and then the pending - * stop signal it is about to process is no longer in the - * pending bitmasks, but must still be cleared by a SIGCONT - * (and overruled by a SIGKILL). So those cases clear this - * shared flag after we've set it. Note that this flag may - * remain set after the signal we return is ignored or - * handled. That doesn't matter because its only purpose - * is to alert stop-signal processing code when another - * processor has come along and cleared the flag. - */ - if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) - tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; - } + if (signr && unlikely(sig_kernel_stop(signr))) { + /* + * Set a marker that we have dequeued a stop signal. Our + * caller might release the siglock and then the pending + * stop signal it is about to process is no longer in the + * pending bitmasks, but must still be cleared by a SIGCONT + * (and overruled by a SIGKILL). So those cases clear this + * shared flag after we've set it. Note that this flag may + * remain set after the signal we return is ignored or + * handled. That doesn't matter because its only purpose + * is to alert stop-signal processing code when another + * processor has come along and cleared the flag. + */ + if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) + tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; + } if ( signr && ((info->si_code & __SI_MASK) == __SI_TIMER) && info->si_sys_private){ @@ -575,7 +500,7 @@ static int check_kill_permission(int sig, struct siginfo *info, error = -EPERM; if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) && ((sig != SIGCONT) || - (current->signal->session != t->signal->session)) + (process_session(current) != process_session(t))) && (current->euid ^ t->suid) && (current->euid ^ t->uid) && (current->uid ^ t->suid) && (current->uid ^ t->uid) && !capable(CAP_KILL)) @@ -998,17 +923,6 @@ void zap_other_threads(struct task_struct *p) if (t->exit_state) continue; - /* - * We don't want to notify the parent, since we are - * killed as part of a thread group due to another - * thread doing an execve() or similar. So set the - * exit signal to -1 to allow immediate reaping of - * the process. But don't detach the thread group - * leader. - */ - if (t != p->group_leader) - t->exit_signal = -1; - /* SIGKILL will be handled before any pending SIGSTOP */ sigaddset(&t->pending.signal, SIGKILL); signal_wake_up(t, 1); @@ -1055,64 +969,68 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) } /* - * kill_pg_info() sends a signal to a process group: this is what the tty + * kill_pgrp_info() sends a signal to a process group: this is what the tty * control characters do (^C, ^Z etc) */ -int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp) +int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) { struct task_struct *p = NULL; int retval, success; - if (pgrp <= 0) - return -EINVAL; - success = 0; retval = -ESRCH; - do_each_task_pid(pgrp, PIDTYPE_PGID, p) { + do_each_pid_task(pgrp, PIDTYPE_PGID, p) { int err = group_send_sig_info(sig, info, p); success |= !err; retval = err; - } while_each_task_pid(pgrp, PIDTYPE_PGID, p); + } while_each_pid_task(pgrp, PIDTYPE_PGID, p); return success ? 0 : retval; } -int -kill_pg_info(int sig, struct siginfo *info, pid_t pgrp) +int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) { int retval; read_lock(&tasklist_lock); - retval = __kill_pg_info(sig, info, pgrp); + retval = __kill_pgrp_info(sig, info, pgrp); read_unlock(&tasklist_lock); return retval; } -int -kill_proc_info(int sig, struct siginfo *info, pid_t pid) +int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) { int error; - int acquired_tasklist_lock = 0; struct task_struct *p; rcu_read_lock(); - if (unlikely(sig_needs_tasklist(sig))) { + if (unlikely(sig_needs_tasklist(sig))) read_lock(&tasklist_lock); - acquired_tasklist_lock = 1; - } - p = find_task_by_pid(pid); + + p = pid_task(pid, PIDTYPE_PID); error = -ESRCH; if (p) error = group_send_sig_info(sig, info, p); - if (unlikely(acquired_tasklist_lock)) + + if (unlikely(sig_needs_tasklist(sig))) read_unlock(&tasklist_lock); rcu_read_unlock(); return error; } -/* like kill_proc_info(), but doesn't use uid/euid of "current" */ -int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid, +int +kill_proc_info(int sig, struct siginfo *info, pid_t pid) +{ + int error; + rcu_read_lock(); + error = kill_pid_info(sig, info, find_pid(pid)); + rcu_read_unlock(); + return error; +} + +/* like kill_pid_info(), but doesn't use uid/euid of "current" */ +int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, uid_t uid, uid_t euid, u32 secid) { int ret = -EINVAL; @@ -1122,7 +1040,7 @@ int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid, return ret; read_lock(&tasklist_lock); - p = find_task_by_pid(pid); + p = pid_task(pid, PIDTYPE_PID); if (!p) { ret = -ESRCH; goto out_unlock; @@ -1146,7 +1064,7 @@ out_unlock: read_unlock(&tasklist_lock); return ret; } -EXPORT_SYMBOL_GPL(kill_proc_info_as_uid); +EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); /* * kill_something_info() interprets pid in interesting ways just like kill(2). @@ -1157,8 +1075,10 @@ EXPORT_SYMBOL_GPL(kill_proc_info_as_uid); static int kill_something_info(int sig, struct siginfo *info, int pid) { + int ret; + rcu_read_lock(); if (!pid) { - return kill_pg_info(sig, info, process_group(current)); + ret = kill_pgrp_info(sig, info, task_pgrp(current)); } else if (pid == -1) { int retval = 0, count = 0; struct task_struct * p; @@ -1173,12 +1093,14 @@ static int kill_something_info(int sig, struct siginfo *info, int pid) } } read_unlock(&tasklist_lock); - return count ? retval : -ESRCH; + ret = count ? retval : -ESRCH; } else if (pid < 0) { - return kill_pg_info(sig, info, -pid); + ret = kill_pgrp_info(sig, info, find_pid(-pid)); } else { - return kill_proc_info(sig, info, pid); + ret = kill_pid_info(sig, info, find_pid(pid)); } + rcu_read_unlock(); + return ret; } /* @@ -1264,11 +1186,17 @@ force_sigsegv(int sig, struct task_struct *p) return 0; } -int -kill_pg(pid_t pgrp, int sig, int priv) +int kill_pgrp(struct pid *pid, int sig, int priv) +{ + return kill_pgrp_info(sig, __si_special(priv), pid); +} +EXPORT_SYMBOL(kill_pgrp); + +int kill_pid(struct pid *pid, int sig, int priv) { - return kill_pg_info(sig, __si_special(priv), pgrp); + return kill_pid_info(sig, __si_special(priv), pid); } +EXPORT_SYMBOL(kill_pid); int kill_proc(pid_t pid, int sig, int priv) @@ -1658,7 +1586,9 @@ finish_stop(int stop_count) read_unlock(&tasklist_lock); } - schedule(); + do { + schedule(); + } while (try_to_freeze()); /* * Now we don't run again until continued. */ @@ -1833,8 +1763,12 @@ relock: if (sig_kernel_ignore(signr)) /* Default is nothing. */ continue; - /* Init gets no signals it doesn't want. */ - if (current == child_reaper) + /* + * Init of a pid space gets no signals it doesn't want from + * within that pid space. It can of course get signals from + * its parent pid space. + */ + if (current == child_reaper(current)) continue; if (sig_kernel_stop(signr)) { @@ -1853,7 +1787,7 @@ relock: /* signals can be posted during this window */ - if (is_orphaned_pgrp(process_group(current))) + if (is_current_pgrp_orphaned()) goto relock; spin_lock_irq(¤t->sighand->siglock); @@ -1903,7 +1837,6 @@ EXPORT_SYMBOL(recalc_sigpending); EXPORT_SYMBOL_GPL(dequeue_signal); EXPORT_SYMBOL(flush_signals); EXPORT_SYMBOL(force_sig); -EXPORT_SYMBOL(kill_pg); EXPORT_SYMBOL(kill_proc); EXPORT_SYMBOL(ptrace_notify); EXPORT_SYMBOL(send_sig); @@ -2230,7 +2163,7 @@ static int do_tkill(int tgid, int pid, int sig) * @pid: the PID of the thread * @sig: signal to be sent * - * This syscall also checks the tgid and returns -ESRCH even if the PID + * This syscall also checks the @tgid and returns -ESRCH even if the PID * exists but it's not belonging to the target process anymore. This * method solves the problem of threads exiting and PIDs getting reused. */ @@ -2582,9 +2515,5 @@ __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) void __init signals_init(void) { - sigqueue_cachep = - kmem_cache_create("sigqueue", - sizeof(struct sigqueue), - __alignof__(struct sigqueue), - SLAB_PANIC, NULL, NULL); + sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); }