#include <linux/ptrace.h>
#include <asm/param.h>
#include <asm/uaccess.h>
+#include <asm/unistd.h>
#include <asm/siginfo.h>
/*
static kmem_cache_t *sigqueue_cachep;
-atomic_t nr_queued_signals;
-int max_queued_signals = 1024;
-
/*
* In POSIX a signal is sent either to a specific thread (Linux task)
* or to the process as a whole (Linux thread group). How the signal
static int sig_ignored(struct task_struct *t, int sig)
{
- void * handler;
+ void __user * handler;
/*
* Tracers always want to know about signals..
return sig;
}
-struct sigqueue *__sigqueue_alloc(void)
+static struct sigqueue *__sigqueue_alloc(void)
{
- struct sigqueue *q = 0;
+ struct sigqueue *q = NULL;
- if (atomic_read(&nr_queued_signals) < max_queued_signals)
+ if (atomic_read(¤t->user->sigpending) <
+ current->rlim[RLIMIT_SIGPENDING].rlim_cur)
q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
if (q) {
- atomic_inc(&nr_queued_signals);
INIT_LIST_HEAD(&q->list);
q->flags = 0;
- q->lock = 0;
+ q->lock = NULL;
+#warning MEF PLANETLAB: q->user = get_uid(current->user); is something new in Fedora Core.
+ q->user = get_uid(current->user);
+ atomic_inc(&q->user->sigpending);
}
return(q);
}
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
+ atomic_dec(&q->user->sigpending);
+ free_uid(q->user);
kmem_cache_free(sigqueue_cachep, q);
- atomic_dec(&nr_queued_signals);
}
static void flush_sigqueue(struct sigpending *queue)
static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
{
- struct sigqueue *q, *first = 0;
+ struct sigqueue *q, *first = NULL;
int still_pending = 0;
if (unlikely(!sigismember(&list->signal, sig)))
}
}
-static int send_signal(int sig, struct siginfo *info, struct sigpending *signals)
+static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
+ struct sigpending *signals)
{
struct sigqueue * q = NULL;
int ret = 0;
make sure at least one signal gets delivered and don't
pass on the info struct. */
- if (atomic_read(&nr_queued_signals) < max_queued_signals)
+ if (atomic_read(&t->user->sigpending) <
+ t->rlim[RLIMIT_SIGPENDING].rlim_cur)
q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
if (q) {
- atomic_inc(&nr_queued_signals);
q->flags = 0;
+#warning MEF PLANETLAB: q->user = get_uid(t->user); is something new in Fedora Core.
+ q->user = get_uid(t->user);
+ atomic_inc(&q->user->sigpending);
list_add_tail(&q->list, &signals->list);
switch ((unsigned long) info) {
case 0:
if (LEGACY_QUEUE(&t->pending, sig))
goto out;
- ret = send_signal(sig, info, &t->pending);
+ ret = send_signal(sig, info, t, &t->pending);
if (!ret && !sigismember(&t->blocked, sig))
signal_wake_up(t, sig == SIGKILL);
out:
* We always use the shared queue for process-wide signals,
* to avoid several races.
*/
- ret = send_signal(sig, info, &p->signal->shared_pending);
+ ret = send_signal(sig, info, p, &p->signal->shared_pending);
if (unlikely(ret))
return ret;
unsigned long flags;
int ret;
+ if (!vx_check(vx_task_xid(p), VX_ADMIN|VX_WATCH|VX_IDENT))
+ return -ESRCH;
+
ret = check_kill_permission(sig, info, p);
if (!ret && sig && p->sighand) {
spin_lock_irqsave(&p->sighand->siglock, flags);
struct task_struct *p;
struct list_head *l;
struct pid *pid;
- int retval;
- int found;
+ int retval, success;
if (pgrp <= 0)
return -EINVAL;
- found = 0;
- retval = 0;
+ success = 0;
+ retval = -ESRCH;
for_each_task_pid(pgrp, PIDTYPE_PGID, p, l, pid) {
- int err;
-
- found = 1;
- err = group_send_sig_info(sig, info, p);
- if (!retval)
- retval = err;
+ int err = group_send_sig_info(sig, info, p);
+ success |= !err;
+ retval = err;
}
- return found ? retval : -ESRCH;
+ return success ? 0 : retval;
}
int
int ret;
unsigned long flags;
+ /*
+ * Make sure legacy kernel users don't send in bad values
+ * (normal paths check this in check_kill_permission).
+ */
+ if (sig < 0 || sig > _NSIG)
+ return -EINVAL;
+
/*
* We need the tasklist lock even for the specific
* thread case (when we don't need to follow the group
* Fortunately this is not necessary for thread groups:
*/
if (p->tgid == tsk->tgid) {
- wake_up_interruptible(&tsk->wait_chldexit);
+ wake_up_interruptible_sync(&tsk->wait_chldexit);
return;
}
do {
- wake_up_interruptible(&tsk->wait_chldexit);
+ wake_up_interruptible_sync(&tsk->wait_chldexit);
tsk = next_thread(tsk);
if (tsk->signal != parent->signal)
BUG();
spin_unlock_irqrestore(&sighand->siglock, flags);
}
+int print_fatal_signals = 0;
+
+static void print_fatal_signal(struct pt_regs *regs, int signr)
+{
+ int i;
+ unsigned char insn;
+ printk("%s/%d: potentially unexpected fatal signal %d.\n",
+ current->comm, current->pid, signr);
+
+#ifdef __i386__
+ printk("code at %08lx: ", regs->eip);
+ for (i = 0; i < 16; i++) {
+ __get_user(insn, (unsigned char *)(regs->eip + i));
+ printk("%02x ", insn);
+ }
+#endif
+ printk("\n");
+ show_regs(regs);
+}
+
+static int __init setup_print_fatal_signals(char *str)
+{
+ get_option (&str, &print_fatal_signals);
+
+ return 1;
+}
+
+__setup("print-fatal-signals=", setup_print_fatal_signals);
#ifndef HAVE_ARCH_GET_SIGNAL_TO_DELIVER
if (!signr)
break; /* will return 0 */
+ if ((signr == SIGSEGV) && print_fatal_signals) {
+ spin_unlock_irq(¤t->sighand->siglock);
+ print_fatal_signal(regs, signr);
+ spin_lock_irq(¤t->sighand->siglock);
+ }
if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
ptrace_signal_deliver(regs, cookie);
* Anything else is fatal, maybe with a core dump.
*/
current->flags |= PF_SIGNALED;
+ if (print_fatal_signals)
+ print_fatal_signal(regs, signr);
if (sig_kernel_coredump(signr) &&
do_coredump((long)signr, signr, regs)) {
/*
}
/**
- * sys_tkill - send signal to one specific thread
+ * sys_tgkill - send signal to one specific thread
* @tgid: the thread group ID of the thread
* @pid: the PID of the thread
* @sig: signal to be sent
int error;
if (uoss) {
- oss.ss_sp = (void *) current->sas_ss_sp;
+ oss.ss_sp = (void __user *) current->sas_ss_sp;
oss.ss_size = current->sas_ss_size;
oss.ss_flags = sas_ss_flags(sp);
}
if (uss) {
- void *ss_sp;
+ void __user *ss_sp;
size_t ss_size;
int ss_flags;
return error;
}
+#ifdef __ARCH_WANT_SYS_SIGPENDING
+
asmlinkage long
sys_sigpending(old_sigset_t __user *set)
{
return do_sigpending(set, sizeof(*set));
}
-#if !defined(__alpha__)
-/* Alpha has its own versions with special arguments. */
+#endif
+
+#ifdef __ARCH_WANT_SYS_SIGPROCMASK
+/* Some platforms have their own version with special arguments others
+ support only sys_rt_sigprocmask. */
asmlinkage long
sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
out:
return error;
}
+#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
-#ifndef __sparc__
+#ifdef __ARCH_WANT_SYS_RT_SIGACTION
asmlinkage long
sys_rt_sigaction(int sig,
const struct sigaction __user *act,
out:
return ret;
}
-#endif /* __sparc__ */
-#endif
+#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
+
+#ifdef __ARCH_WANT_SYS_SGETMASK
-#if !defined(__alpha__) && !defined(__ia64__) && \
- !defined(__arm__) && !defined(__s390__)
/*
* For backwards compatibility. Functionality superseded by sigprocmask.
*/
return old;
}
-#endif /* !defined(__alpha__) */
+#endif /* __ARCH_WANT_SGETMASK */
-#if !defined(__alpha__) && !defined(__ia64__) && !defined(__mips__) && \
- !defined(__arm__)
+#ifdef __ARCH_WANT_SYS_SIGNAL
/*
* For backwards compatibility. Functionality superseded by sigaction.
*/
return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
}
-#endif /* !alpha && !__ia64__ && !defined(__mips__) && !defined(__arm__) */
+#endif /* __ARCH_WANT_SYS_SIGNAL */
-#ifndef HAVE_ARCH_SYS_PAUSE
+#ifdef __ARCH_WANT_SYS_PAUSE
asmlinkage long
sys_pause(void)
return -ERESTARTNOHAND;
}
-#endif /* HAVE_ARCH_SYS_PAUSE */
+#endif
void __init signals_init(void)
{
kmem_cache_create("sigqueue",
sizeof(struct sigqueue),
__alignof__(struct sigqueue),
- 0, NULL, NULL);
- if (!sigqueue_cachep)
- panic("signals_init(): cannot create sigqueue SLAB cache");
+ SLAB_PANIC, NULL, NULL);
}