#include <linux/ptrace.h>
#include <asm/param.h>
#include <asm/uaccess.h>
+#include <asm/unistd.h>
#include <asm/siginfo.h>
+extern void k_getrusage(struct task_struct *, int, struct rusage *);
+
/*
* SLAB caches for signal bits.
*/
static kmem_cache_t *sigqueue_cachep;
-atomic_t nr_queued_signals;
-int max_queued_signals = 1024;
-
/*
* In POSIX a signal is sent either to a specific thread (Linux task)
* or to the process as a whole (Linux thread group). How the signal
static int sig_ignored(struct task_struct *t, int sig)
{
- void * handler;
+ void __user * handler;
/*
* Tracers always want to know about signals..
return sig;
}
-struct sigqueue *__sigqueue_alloc(void)
+static struct sigqueue *__sigqueue_alloc(void)
{
- struct sigqueue *q = 0;
+ struct sigqueue *q = NULL;
- if (atomic_read(&nr_queued_signals) < max_queued_signals)
+ if (atomic_read(¤t->user->sigpending) <
+ current->rlim[RLIMIT_SIGPENDING].rlim_cur)
q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
if (q) {
- atomic_inc(&nr_queued_signals);
INIT_LIST_HEAD(&q->list);
q->flags = 0;
- q->lock = 0;
+ q->lock = NULL;
+ q->user = get_uid(current->user);
+ atomic_inc(&q->user->sigpending);
}
return(q);
}
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
+ atomic_dec(&q->user->sigpending);
+ free_uid(q->user);
kmem_cache_free(sigqueue_cachep, q);
- atomic_dec(&nr_queued_signals);
}
static void flush_sigqueue(struct sigpending *queue)
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
tsk->signal = NULL;
+ /*
+ * Accumulate here the counters for all threads but the
+ * group leader as they die, so they can be added into
+ * the process-wide totals when those are taken.
+ * The group leader stays around as a zombie as long
+ * as there are other threads. When it gets reaped,
+ * the exit.c code will add its counts into these totals.
+ * We won't ever get here for the group leader, since it
+ * will have been the last reference on the signal_struct.
+ */
+ sig->utime += tsk->utime;
+ sig->stime += tsk->stime;
+ sig->min_flt += tsk->min_flt;
+ sig->maj_flt += tsk->maj_flt;
+ sig->nvcsw += tsk->nvcsw;
+ sig->nivcsw += tsk->nivcsw;
spin_unlock(&sighand->siglock);
sig = NULL; /* Marker for below. */
}
static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
{
- struct sigqueue *q, *first = 0;
+ struct sigqueue *q, *first = NULL;
int still_pending = 0;
if (unlikely(!sigismember(&list->signal, sig)))
struct task_struct *t)
{
int error = -EINVAL;
+ int user;
+
if (sig < 0 || sig > _NSIG)
return error;
+
+ user = (!info || ((unsigned long)info != 1 &&
+ (unsigned long)info != 2 && SI_FROMUSER(info)));
+
error = -EPERM;
- if ((!info || ((unsigned long)info != 1 &&
- (unsigned long)info != 2 && SI_FROMUSER(info)))
- && ((sig != SIGCONT) ||
+ if (user && ((sig != SIGCONT) ||
(current->signal->session != t->signal->session))
&& (current->euid ^ t->suid) && (current->euid ^ t->uid)
&& (current->uid ^ t->suid) && (current->uid ^ t->uid)
&& !capable(CAP_KILL))
return error;
+
+ error = -ESRCH;
+ if (user && !vx_check(vx_task_xid(t), VX_ADMIN|VX_IDENT))
+ return error;
+
return security_task_kill(t, info, sig);
}
/* forward decl */
static void do_notify_parent_cldstop(struct task_struct *tsk,
- struct task_struct *parent);
+ struct task_struct *parent,
+ int why);
/*
* Handle magic process-wide effects of stop/continue signals.
* the SIGCHLD was pending on entry to this kill.
*/
p->signal->group_stop_count = 0;
+ p->signal->stop_state = 1;
+ spin_unlock(&p->sighand->siglock);
if (p->ptrace & PT_PTRACED)
- do_notify_parent_cldstop(p, p->parent);
+ do_notify_parent_cldstop(p, p->parent,
+ CLD_STOPPED);
else
do_notify_parent_cldstop(
p->group_leader,
- p->group_leader->real_parent);
+ p->group_leader->real_parent,
+ CLD_STOPPED);
+ spin_lock(&p->sighand->siglock);
}
rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
t = p;
t = next_thread(t);
} while (t != p);
+
+ if (p->signal->stop_state > 0) {
+ /*
+ * We were in fact stopped, and are now continued.
+ * Notify the parent with CLD_CONTINUED.
+ */
+ p->signal->stop_state = -1;
+ p->signal->group_exit_code = 0;
+ spin_unlock(&p->sighand->siglock);
+ if (p->ptrace & PT_PTRACED)
+ do_notify_parent_cldstop(p, p->parent,
+ CLD_CONTINUED);
+ else
+ do_notify_parent_cldstop(
+ p->group_leader,
+ p->group_leader->real_parent,
+ CLD_CONTINUED);
+ spin_lock(&p->sighand->siglock);
+ }
}
}
-static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
+static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+ struct sigpending *signals)
{
struct sigqueue * q = NULL;
int ret = 0;
make sure at least one signal gets delivered and don't
pass on the info struct. */
- if (atomic_read(&nr_queued_signals) < max_queued_signals)
+ if (atomic_read(&t->user->sigpending) <
+ t->rlim[RLIMIT_SIGPENDING].rlim_cur)
q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
if (q) {
- atomic_inc(&nr_queued_signals);
q->flags = 0;
+ q->user = get_uid(t->user);
+ atomic_inc(&q->user->sigpending);
list_add_tail(&q->list, &signals->list);
switch ((unsigned long) info) {
case 0:
if (LEGACY_QUEUE(&t->pending, sig))
goto out;
- ret = send_signal(sig, info, &t->pending);
+ ret = send_signal(sig, info, t, &t->pending);
if (!ret && !sigismember(&t->blocked, sig))
signal_wake_up(t, sig == SIGKILL);
out:
static void
-__group_complete_signal(int sig, struct task_struct *p, unsigned int mask)
+__group_complete_signal(int sig, struct task_struct *p)
{
+ unsigned int mask;
struct task_struct *t;
+ /*
+ * Don't bother zombies and stopped tasks (but
+ * SIGKILL will punch through stopped state)
+ */
+ mask = TASK_DEAD | TASK_ZOMBIE | TASK_TRACED;
+ if (sig != SIGKILL)
+ mask |= TASK_STOPPED;
+
/*
* Now find a thread we can wake up to take the signal off the queue.
*
static int
__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
- unsigned int mask;
int ret = 0;
#ifdef CONFIG_SMP
/* This is a non-RT signal and we already have one queued. */
return ret;
- /*
- * Don't bother zombies and stopped tasks (but
- * SIGKILL will punch through stopped state)
- */
- mask = TASK_DEAD | TASK_ZOMBIE;
- if (sig != SIGKILL)
- mask |= TASK_STOPPED;
-
/*
* Put this signal on the shared-pending queue, or fail with EAGAIN.
* We always use the shared queue for process-wide signals,
* to avoid several races.
*/
- ret = send_signal(sig, info, &p->signal->shared_pending);
+ ret = send_signal(sig, info, p, &p->signal->shared_pending);
if (unlikely(ret))
return ret;
- __group_complete_signal(sig, p, mask);
+ __group_complete_signal(sig, p);
return 0;
}
unsigned long flags;
int ret;
- if (!vx_check(vx_task_xid(p), VX_ADMIN|VX_WATCH|VX_IDENT))
- return -ESRCH;
-
ret = check_kill_permission(sig, info, p);
if (!ret && sig && p->sighand) {
spin_lock_irqsave(&p->sighand->siglock, flags);
int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
{
struct task_struct *p;
- struct list_head *l;
- struct pid *pid;
- int retval;
- int found;
+ int retval, success;
if (pgrp <= 0)
return -EINVAL;
- found = 0;
- retval = 0;
- for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
- int err;
-
- found = 1;
- err = group_send_sig_info(sig, info, p);
- if (!retval)
- retval = err;
- }
- return found ? retval : -ESRCH;
+ success = 0;
+ retval = -ESRCH;
+ do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
+ int err = group_send_sig_info(sig, info, p);
+ success |= !err;
+ retval = err;
+ } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
+ return success ? 0 : retval;
}
int
kill_sl_info(int sig, struct siginfo *info, pid_t sid)
{
int err, retval = -EINVAL;
- struct pid *pid;
- struct list_head *l;
struct task_struct *p;
if (sid <= 0)
retval = -ESRCH;
read_lock(&tasklist_lock);
- for_each_task_pid(sid, PIDTYPE_SID, p, l, pid) {
+ do_each_task_pid(sid, PIDTYPE_SID, p) {
if (!p->signal->leader)
continue;
err = group_send_sig_info(sig, info, p);
if (retval)
retval = err;
- }
+ } while_each_task_pid(sid, PIDTYPE_SID, p);
read_unlock(&tasklist_lock);
out:
return retval;
int ret;
unsigned long flags;
+ /*
+ * Make sure legacy kernel users don't send in bad values
+ * (normal paths check this in check_kill_permission).
+ */
+ if (sig < 0 || sig > _NSIG)
+ return -EINVAL;
+
/*
* We need the tasklist lock even for the specific
* thread case (when we don't need to follow the group
force_sig_info(sig, (void*)1L, p);
}
+/*
+ * When things go south during signal handling, we
+ * will force a SIGSEGV. And if the signal that caused
+ * the problem was already a SIGSEGV, we'll want to
+ * make sure we don't even try to deliver the signal..
+ */
+int
+force_sigsegv(int sig, struct task_struct *p)
+{
+ if (sig == SIGSEGV) {
+ unsigned long flags;
+ spin_lock_irqsave(&p->sighand->siglock, flags);
+ p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
+ spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ }
+ force_sig(SIGSEGV, p);
+ return 0;
+}
+
int
kill_pg(pid_t pgrp, int sig, int priv)
{
send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
{
unsigned long flags;
- unsigned int mask;
int ret = 0;
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
q->info.si_overrun++;
goto out;
}
- /*
- * Don't bother zombies and stopped tasks (but
- * SIGKILL will punch through stopped state)
- */
- mask = TASK_DEAD | TASK_ZOMBIE;
- if (sig != SIGKILL)
- mask |= TASK_STOPPED;
/*
* Put this signal on the shared-pending queue.
list_add_tail(&q->list, &p->signal->shared_pending.list);
sigaddset(&p->signal->shared_pending.signal, sig);
- __group_complete_signal(sig, p, mask);
+ __group_complete_signal(sig, p);
out:
spin_unlock_irqrestore(&p->sighand->siglock, flags);
read_unlock(&tasklist_lock);
* Fortunately this is not necessary for thread groups:
*/
if (p->tgid == tsk->tgid) {
- wake_up_interruptible(&tsk->wait_chldexit);
+ wake_up_interruptible_sync(&tsk->wait_chldexit);
return;
}
do {
- wake_up_interruptible(&tsk->wait_chldexit);
+ wake_up_interruptible_sync(&tsk->wait_chldexit);
tsk = next_thread(tsk);
if (tsk->signal != parent->signal)
BUG();
}
/*
- * Let a parent know about a status change of a child.
+ * Let a parent know about the death of a child.
+ * For a stopped/continued status change, use do_notify_parent_cldstop instead.
*/
void do_notify_parent(struct task_struct *tsk, int sig)
{
struct siginfo info;
unsigned long flags;
- int why, status;
struct sighand_struct *psig;
if (sig == -1)
BUG();
- BUG_ON(tsk->group_leader != tsk && tsk->group_leader->state != TASK_ZOMBIE && !tsk->ptrace);
- BUG_ON(tsk->group_leader == tsk && !thread_group_empty(tsk) && !tsk->ptrace);
+ /* do_notify_parent_cldstop should have been called instead. */
+ BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
+
+ BUG_ON(!tsk->ptrace &&
+ (tsk->group_leader != tsk || !thread_group_empty(tsk)));
info.si_signo = sig;
info.si_errno = 0;
info.si_uid = tsk->uid;
/* FIXME: find out whether or not this is supposed to be c*time. */
- info.si_utime = tsk->utime;
- info.si_stime = tsk->stime;
+ info.si_utime = tsk->utime + tsk->signal->utime;
+ info.si_stime = tsk->stime + tsk->signal->stime;
- status = tsk->exit_code & 0x7f;
- why = SI_KERNEL; /* shouldn't happen */
- switch (tsk->state) {
- case TASK_STOPPED:
- /* FIXME -- can we deduce CLD_TRAPPED or CLD_CONTINUED? */
- if (tsk->ptrace & PT_PTRACED)
- why = CLD_TRAPPED;
- else
- why = CLD_STOPPED;
- break;
-
- default:
- if (tsk->exit_code & 0x80)
- why = CLD_DUMPED;
- else if (tsk->exit_code & 0x7f)
- why = CLD_KILLED;
- else {
- why = CLD_EXITED;
- status = tsk->exit_code >> 8;
- }
- break;
+ info.si_status = tsk->exit_code & 0x7f;
+ if (tsk->exit_code & 0x80)
+ info.si_code = CLD_DUMPED;
+ else if (tsk->exit_code & 0x7f)
+ info.si_code = CLD_KILLED;
+ else {
+ info.si_code = CLD_EXITED;
+ info.si_status = tsk->exit_code >> 8;
}
- info.si_code = why;
- info.si_status = status;
psig = tsk->parent->sighand;
spin_lock_irqsave(&psig->siglock, flags);
- if (sig == SIGCHLD && tsk->state != TASK_STOPPED &&
+ if (sig == SIGCHLD &&
(psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
(psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
/*
spin_unlock_irqrestore(&psig->siglock, flags);
}
-
-/*
- * We need the tasklist lock because it's the only
- * thing that protects out "parent" pointer.
- *
- * exit.c calls "do_notify_parent()" directly, because
- * it already has the tasklist lock.
- */
-void
-notify_parent(struct task_struct *tsk, int sig)
-{
- if (sig != -1) {
- read_lock(&tasklist_lock);
- do_notify_parent(tsk, sig);
- read_unlock(&tasklist_lock);
- }
-}
-
static void
-do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent)
+do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent,
+ int why)
{
struct siginfo info;
unsigned long flags;
info.si_utime = tsk->utime;
info.si_stime = tsk->stime;
- info.si_status = tsk->exit_code & 0x7f;
- info.si_code = CLD_STOPPED;
+ info.si_code = why;
+ switch (why) {
+ case CLD_CONTINUED:
+ info.si_status = SIGCONT;
+ break;
+ case CLD_STOPPED:
+ info.si_status = tsk->signal->group_exit_code & 0x7f;
+ break;
+ case CLD_TRAPPED:
+ info.si_status = tsk->exit_code & 0x7f;
+ break;
+ default:
+ BUG();
+ }
sighand = parent->sighand;
spin_lock_irqsave(&sighand->siglock, flags);
spin_unlock_irqrestore(&sighand->siglock, flags);
}
+/*
+ * This must be called with current->sighand->siglock held.
+ *
+ * This should be the path for all ptrace stops.
+ * We always set current->last_siginfo while stopped here.
+ * That makes it a way to test a stopped process for
+ * being ptrace-stopped vs being job-control-stopped.
+ */
+static void ptrace_stop(int exit_code, siginfo_t *info)
+{
+ BUG_ON(!(current->ptrace & PT_PTRACED));
+
+ /*
+ * If there is a group stop in progress,
+ * we must participate in the bookkeeping.
+ */
+ if (current->signal->group_stop_count > 0)
+ --current->signal->group_stop_count;
+
+ current->last_siginfo = info;
+ current->exit_code = exit_code;
+
+ /* Let the debugger run. */
+ set_current_state(TASK_TRACED);
+ spin_unlock_irq(¤t->sighand->siglock);
+ read_lock(&tasklist_lock);
+ do_notify_parent_cldstop(current, current->parent, CLD_TRAPPED);
+ read_unlock(&tasklist_lock);
+ schedule();
+
+ /*
+ * We are back. Now reacquire the siglock before touching
+ * last_siginfo, so that we are sure to have synchronized with
+ * any signal-sending on another CPU that wants to examine it.
+ */
+ spin_lock_irq(¤t->sighand->siglock);
+ current->last_siginfo = NULL;
+
+ /*
+ * Queued signals ignored us while we were stopped for tracing.
+ * So check for any that we should take before resuming user mode.
+ */
+ recalc_sigpending();
+}
+
+void ptrace_notify(int exit_code)
+{
+ siginfo_t info;
+
+ BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
+
+ memset(&info, 0, sizeof info);
+ info.si_signo = SIGTRAP;
+ info.si_code = exit_code;
+ info.si_pid = current->pid;
+ info.si_uid = current->uid;
+
+ /* Let the debugger run. */
+ spin_lock_irq(¤t->sighand->siglock);
+ ptrace_stop(exit_code, &info);
+ spin_unlock_irq(¤t->sighand->siglock);
+}
#ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
*/
if (stop_count < 0 || (current->ptrace & PT_PTRACED)) {
read_lock(&tasklist_lock);
- do_notify_parent_cldstop(current, current->parent);
+ do_notify_parent_cldstop(current, current->parent,
+ CLD_STOPPED);
read_unlock(&tasklist_lock);
}
else if (stop_count == 0) {
read_lock(&tasklist_lock);
do_notify_parent_cldstop(current->group_leader,
- current->group_leader->real_parent);
+ current->group_leader->real_parent,
+ CLD_STOPPED);
read_unlock(&tasklist_lock);
}
stop_count = --sig->group_stop_count;
current->exit_code = signr;
set_current_state(TASK_STOPPED);
+ if (stop_count == 0)
+ sig->stop_state = 1;
spin_unlock_irq(&sighand->siglock);
}
else if (thread_group_empty(current)) {
/*
* Lock must be held through transition to stopped state.
*/
- current->exit_code = signr;
+ current->exit_code = current->signal->group_exit_code = signr;
set_current_state(TASK_STOPPED);
+ sig->stop_state = 1;
spin_unlock_irq(&sighand->siglock);
}
else {
current->exit_code = signr;
set_current_state(TASK_STOPPED);
+ if (stop_count == 0)
+ sig->stop_state = 1;
spin_unlock_irq(&sighand->siglock);
read_unlock(&tasklist_lock);
* without any associated signal being in our queue.
*/
stop_count = --current->signal->group_stop_count;
+ if (stop_count == 0)
+ current->signal->stop_state = 1;
current->exit_code = current->signal->group_exit_code;
set_current_state(TASK_STOPPED);
spin_unlock_irq(¤t->sighand->siglock);
return 1;
}
-int get_signal_to_deliver(siginfo_t *info, struct pt_regs *regs, void *cookie)
+int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
+ struct pt_regs *regs, void *cookie)
{
sigset_t *mask = ¤t->blocked;
int signr = 0;
if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
ptrace_signal_deliver(regs, cookie);
- /*
- * If there is a group stop in progress,
- * we must participate in the bookkeeping.
- */
- if (current->signal->group_stop_count > 0)
- --current->signal->group_stop_count;
-
/* Let the debugger run. */
- current->exit_code = signr;
- current->last_siginfo = info;
- set_current_state(TASK_STOPPED);
- spin_unlock_irq(¤t->sighand->siglock);
- notify_parent(current, SIGCHLD);
- schedule();
-
- current->last_siginfo = NULL;
+ ptrace_stop(signr, info);
/* We're back. Did the debugger cancel the sig? */
- spin_lock_irq(¤t->sighand->siglock);
signr = current->exit_code;
if (signr == 0)
continue;
ka = ¤t->sighand->action[signr-1];
if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
continue;
- if (ka->sa.sa_handler != SIG_DFL) /* Run the handler. */
+ if (ka->sa.sa_handler != SIG_DFL) {
+ /* Run the handler. */
+ *return_ka = *ka;
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
break; /* will return non-zero "signr" value */
+ }
/*
* Now we are doing the default action for this signal.
EXPORT_SYMBOL(kill_proc_info);
EXPORT_SYMBOL(kill_sl);
EXPORT_SYMBOL(kill_sl_info);
-EXPORT_SYMBOL(notify_parent);
+EXPORT_SYMBOL(ptrace_notify);
EXPORT_SYMBOL(send_sig);
EXPORT_SYMBOL(send_sig_info);
EXPORT_SYMBOL(send_group_sig_info);
}
/**
- * sys_tkill - send signal to one specific thread
+ * sys_tgkill - send signal to one specific thread
* @tgid: the thread group ID of the thread
* @pid: the PID of the thread
* @sig: signal to be sent
int error;
if (uoss) {
- oss.ss_sp = (void *) current->sas_ss_sp;
+ oss.ss_sp = (void __user *) current->sas_ss_sp;
oss.ss_size = current->sas_ss_size;
oss.ss_flags = sas_ss_flags(sp);
}
if (uss) {
- void *ss_sp;
+ void __user *ss_sp;
size_t ss_size;
int ss_flags;
return error;
}
+#ifdef __ARCH_WANT_SYS_SIGPENDING
+
asmlinkage long
sys_sigpending(old_sigset_t __user *set)
{
return do_sigpending(set, sizeof(*set));
}
-#if !defined(__alpha__)
-/* Alpha has its own versions with special arguments. */
+#endif
+
+#ifdef __ARCH_WANT_SYS_SIGPROCMASK
+/* Some platforms have their own version with special arguments others
+ support only sys_rt_sigprocmask. */
asmlinkage long
sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
out:
return error;
}
+#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
-#ifndef __sparc__
+#ifdef __ARCH_WANT_SYS_RT_SIGACTION
asmlinkage long
sys_rt_sigaction(int sig,
const struct sigaction __user *act,
out:
return ret;
}
-#endif /* __sparc__ */
-#endif
+#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
+
+#ifdef __ARCH_WANT_SYS_SGETMASK
-#if !defined(__alpha__) && !defined(__ia64__) && \
- !defined(__arm__) && !defined(__s390__)
/*
* For backwards compatibility. Functionality superseded by sigprocmask.
*/
return old;
}
-#endif /* !defined(__alpha__) */
+#endif /* __ARCH_WANT_SGETMASK */
-#if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__) && \
- !defined(__arm__)
+#ifdef __ARCH_WANT_SYS_SIGNAL
/*
* For backwards compatibility. Functionality superseded by sigaction.
*/
return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
}
-#endif /* !alpha && !__ia64__ && !defined(__mips__) && !defined(__arm__) */
+#endif /* __ARCH_WANT_SYS_SIGNAL */
-#ifndef HAVE_ARCH_SYS_PAUSE
+#ifdef __ARCH_WANT_SYS_PAUSE
asmlinkage long
sys_pause(void)
return -ERESTARTNOHAND;
}
-#endif /* HAVE_ARCH_SYS_PAUSE */
+#endif
void __init signals_init(void)
{
kmem_cache_create("sigqueue",
sizeof(struct sigqueue),
__alignof__(struct sigqueue),
- 0, NULL, NULL);
- if (!sigqueue_cachep)
- panic("signals_init(): cannot create sigqueue SLAB cache");
+ SLAB_PANIC, NULL, NULL);
}