X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ipc%2Fsem.c;h=71f652c30a7f16f0589e0c51995169184163ff7a;hb=9464c7cf61b9433057924c36e6e02f303a00e768;hp=d226d3dd1762bfe0dbd9fb7696bbbef60f11b1d4;hpb=e28576f437e494f8636fbc811976f4f918270671;p=linux-2.6.git diff --git a/ipc/sem.c b/ipc/sem.c index d226d3dd1..71f652c30 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -56,11 +56,14 @@ * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie * * SMP-threaded, sysctl's added - * (c) 1999 Manfred Spraul + * (c) 1999 Manfred Spraul * Enforced range limit on SEM_UNDO * (c) 2001 Red Hat Inc * Lockless wakeup * (c) 2003 Manfred Spraul + * + * support for audit of ipc object properties and permission changes + * Dustin Kirkland */ #include @@ -71,6 +74,13 @@ #include #include #include +#include +#include +#include +#include +#include +#include + #include #include "util.h" @@ -87,7 +97,7 @@ static struct ipc_ids sem_ids; static int newary (key_t, int, int); static void freeary (struct sem_array *sma, int id); #ifdef CONFIG_PROC_FS -static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data); +static int sysvipc_sem_proc_show(struct seq_file *s, void *it); #endif #define SEMMSL_FAST 256 /* 512 bytes on stack */ @@ -114,10 +124,10 @@ void __init sem_init (void) { used_sems = 0; ipc_init_ids(&sem_ids,sc_semmni); - -#ifdef CONFIG_PROC_FS - create_proc_read_entry("sysvipc/sem", 0, 0, sysvipc_sem_read_proc, NULL); -#endif + ipc_init_proc_interface("sysvipc/sem", + " key semid perms nsems uid gid cuid cgid otime ctime\n", + &sem_ids, + sysvipc_sem_proc_show); } /* @@ -135,7 +145,7 @@ void __init sem_init (void) * * if it's IN_WAKEUP, then it must wait until the value changes * * if it's not -EINTR, then the operation was completed by * update_queue. semtimedop can return queue.status without - * performing any operation on the semaphore array. + * performing any operation on the sem array. * * otherwise it must acquire the spinlock and check what's up. * * The two-stage algorithm is necessary to protect against the following @@ -175,23 +185,24 @@ static int newary (key_t key, int nsems, int semflg) sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.key = key; - sma->sem_perm.xid = current->xid; + sma->sem_perm.xid = vx_current_xid(); sma->sem_perm.security = NULL; retval = security_sem_alloc(sma); if (retval) { - ipc_rcu_free(sma, size); + ipc_rcu_putref(sma); return retval; } id = ipc_addid(&sem_ids, &sma->sem_perm, sc_semmni); if(id == -1) { security_sem_free(sma); - ipc_rcu_free(sma, size); + ipc_rcu_putref(sma); return -ENOSPC; } used_sems += nsems; + sma->sem_id = sem_buildid(id, sma->sem_perm.seq); sma->sem_base = (struct sem *) &sma[1]; /* sma->sem_pending = NULL; */ sma->sem_pending_last = &sma->sem_pending; @@ -200,7 +211,7 @@ static int newary (key_t key, int nsems, int semflg) sma->sem_ctime = get_seconds(); sem_unlock(sma); - return sem_buildid(id, sma->sem_perm.seq); + return sma->sem_id; } asmlinkage long sys_semget (key_t key, int nsems, int semflg) @@ -210,7 +221,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) if (nsems < 0 || nsems > sc_semmsl) return -EINVAL; - down(&sem_ids.sem); + mutex_lock(&sem_ids.mutex); if (key == IPC_PRIVATE) { err = newary(key, nsems, semflg); @@ -223,8 +234,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) err = -EEXIST; } else { sma = sem_lock(id); - if(sma==NULL) - BUG(); + BUG_ON(sma==NULL); if (nsems > sma->sem_nsems) err = -EINVAL; else if (ipcperms(&sma->sem_perm, semflg)) @@ -238,29 +248,10 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) sem_unlock(sma); } - up(&sem_ids.sem); + mutex_unlock(&sem_ids.mutex); return err; } -/* doesn't acquire the sem_lock on error! */ -static int sem_revalidate(int semid, struct sem_array* sma, int nsems, short flg) -{ - struct sem_array* smanew; - - smanew = sem_lock(semid); - if(smanew==NULL) - return -EIDRM; - if(smanew != sma || sem_checkid(sma,semid) || sma->sem_nsems != nsems) { - sem_unlock(smanew); - return -EIDRM; - } - - if (flg && ipcperms(&sma->sem_perm, flg)) { - sem_unlock(smanew); - return -EACCES; - } - return 0; -} /* Manage the doubly linked list sma->sem_pending as a FIFO: * insert new queue elements at the tail sma->sem_pending_last. */ @@ -377,12 +368,27 @@ static void update_queue (struct sem_array * sma) if (error <= 0) { struct sem_queue *n; remove_from_queue(sma,q); - n = q->next; q->status = IN_WAKEUP; + /* + * Continue scanning. The next operation + * that must be checked depends on the type of the + * completed operation: + * - if the operation modified the array, then + * restart from the head of the queue and + * check for threads that might be waiting + * for semaphore values to become 0. + * - if the operation didn't modify the array, + * then just continue. + */ + if (q->alter) + n = sma->sem_pending; + else + n = q->next; wake_up_process(q->sleeper); /* hands-off: q will disappear immediately after * writing q->status. */ + smp_wmb(); q->status = error; q = n; } else { @@ -437,8 +443,8 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum) return semzcnt; } -/* Free a semaphore set. freeary() is called with sem_ids.sem down and - * the spinlock for this semaphore set hold. sem_ids.sem remains locked +/* Free a semaphore set. freeary() is called with sem_ids.mutex locked and + * the spinlock for this semaphore set hold. sem_ids.mutex remains locked * on exit. */ static void freeary (struct sem_array *sma, int id) @@ -463,6 +469,7 @@ static void freeary (struct sem_array *sma, int id) n = q->next; q->status = IN_WAKEUP; wake_up_process(q->sleeper); /* doesn't sleep */ + smp_wmb(); q->status = -EIDRM; /* hands-off q */ q = n; } @@ -474,7 +481,7 @@ static void freeary (struct sem_array *sma, int id) used_sems -= sma->sem_nsems; size = sizeof (*sma) + sma->sem_nsems * sizeof (struct sem); security_sem_free(sma); - ipc_rcu_free(sma, size); + ipc_rcu_putref(sma); } static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) @@ -524,7 +531,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu seminfo.semmnu = SEMMNU; seminfo.semmap = SEMMAP; seminfo.semume = SEMUME; - down(&sem_ids.sem); + mutex_lock(&sem_ids.mutex); if (cmd == SEM_INFO) { seminfo.semusz = sem_ids.in_use; seminfo.semaem = used_sems; @@ -533,7 +540,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu seminfo.semaem = SEMAEM; } max_id = sem_ids.max_id; - up(&sem_ids.sem); + mutex_unlock(&sem_ids.mutex); if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) return -EFAULT; return (max_id < 0) ? 0: max_id; @@ -543,7 +550,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu struct semid64_ds tbuf; int id; - if(semid >= sem_ids.size) + if(semid >= sem_ids.entries->size) return -EINVAL; memset(&tbuf,0,sizeof(tbuf)); @@ -615,13 +622,24 @@ static int semctl_main(int semid, int semnum, int cmd, int version, union semun int i; if(nsems > SEMMSL_FAST) { + ipc_rcu_getref(sma); sem_unlock(sma); + sem_io = ipc_alloc(sizeof(ushort)*nsems); - if(sem_io == NULL) + if(sem_io == NULL) { + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + sem_unlock(sma); return -ENOMEM; - err = sem_revalidate(semid, sma, nsems, S_IRUGO); - if(err) + } + + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + if (sma->sem_perm.deleted) { + sem_unlock(sma); + err = -EIDRM; goto out_free; + } } for (i = 0; i < sma->sem_nsems; i++) @@ -637,28 +655,43 @@ static int semctl_main(int semid, int semnum, int cmd, int version, union semun int i; struct sem_undo *un; + ipc_rcu_getref(sma); sem_unlock(sma); if(nsems > SEMMSL_FAST) { sem_io = ipc_alloc(sizeof(ushort)*nsems); - if(sem_io == NULL) + if(sem_io == NULL) { + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + sem_unlock(sma); return -ENOMEM; + } } if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + sem_unlock(sma); err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + sem_unlock(sma); err = -ERANGE; goto out_free; } } - err = sem_revalidate(semid, sma, nsems, S_IWUGO); - if(err) + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + if (sma->sem_perm.deleted) { + sem_unlock(sma); + err = -EIDRM; goto out_free; + } for (i = 0; i < nsems; i++) sma->sem_base[i].semval = sem_io[i]; @@ -792,7 +825,11 @@ static int semctl_down(int semid, int semnum, int cmd, int version, union semun goto out_unlock; } ipcp = &sma->sem_perm; - + + err = audit_ipc_obj(ipcp); + if (err) + goto out_unlock; + if (current->euid != ipcp->cuid && current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) { err=-EPERM; @@ -809,6 +846,9 @@ static int semctl_down(int semid, int semnum, int cmd, int version, union semun err = 0; break; case IPC_SET: + err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode, ipcp); + if (err) + goto out_unlock; ipcp->uid = setbuf.uid; ipcp->gid = setbuf.gid; ipcp->mode = (ipcp->mode & ~S_IRWXUGO) @@ -857,9 +897,9 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) return err; case IPC_RMID: case IPC_SET: - down(&sem_ids.sem); + mutex_lock(&sem_ids.mutex); err = semctl_down(semid,semnum,cmd,version,arg); - up(&sem_ids.sem); + mutex_unlock(&sem_ids.mutex); return err; default: return -EINVAL; @@ -871,7 +911,7 @@ static inline void lock_semundo(void) struct sem_undo_list *undo_list; undo_list = current->sysvsem.undo_list; - if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1)) + if (undo_list) spin_lock(&undo_list->lock); } @@ -891,7 +931,7 @@ static inline void unlock_semundo(void) struct sem_undo_list *undo_list; undo_list = current->sysvsem.undo_list; - if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1)) + if (undo_list) spin_unlock(&undo_list->lock); } @@ -919,9 +959,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) if (undo_list == NULL) return -ENOMEM; memset(undo_list, 0, size); - /* don't initialize unodhd->lock here. It's done - * in copy_semundo() instead. - */ + spin_lock_init(&undo_list->lock); atomic_set(&undo_list->refcnt, 1); current->sysvsem.undo_list = undo_list; } @@ -978,11 +1016,16 @@ static struct sem_undo *find_undo(int semid) goto out; } nsems = sma->sem_nsems; + ipc_rcu_getref(sma); sem_unlock(sma); new = (struct sem_undo *) kmalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); - if (!new) + if (!new) { + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + sem_unlock(sma); return ERR_PTR(-ENOMEM); + } memset(new, 0, sizeof(struct sem_undo) + sizeof(short)*nsems); new->semadj = (short *) &new[1]; new->semid = semid; @@ -992,13 +1035,18 @@ static struct sem_undo *find_undo(int semid) if (un) { unlock_semundo(); kfree(new); + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + sem_unlock(sma); goto out; } - error = sem_revalidate(semid, sma, nsems, 0); - if (error) { + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + if (sma->sem_perm.deleted) { + sem_unlock(sma); unlock_semundo(); kfree(new); - un = ERR_PTR(error); + un = ERR_PTR(-EIDRM); goto out; } new->proc_next = ulp->proc_list; @@ -1020,7 +1068,7 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, struct sembuf fast_sops[SEMOPM_FAST]; struct sembuf* sops = fast_sops, *sop; struct sem_undo *un; - int undos = 0, decrease = 0, alter = 0, max; + int undos = 0, alter = 0, max; struct sem_queue queue; unsigned long jiffies_left = 0; @@ -1055,13 +1103,10 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, if (sop->sem_num >= max) max = sop->sem_num; if (sop->sem_flg & SEM_UNDO) - undos++; - if (sop->sem_op < 0) - decrease = 1; - if (sop->sem_op > 0) + undos = 1; + if (sop->sem_op != 0) alter = 1; } - alter |= decrease; retry_undos: if (undos) { @@ -1102,8 +1147,11 @@ retry_undos: goto out_unlock_free; error = try_atomic_semop (sma, sops, nsops, un, current->tgid); - if (error <= 0) - goto update; + if (error <= 0) { + if (alter && error == 0) + update_queue (sma); + goto out_unlock_free; + } /* We need to sleep on this operation, so we put the current * task into the pending queue and go to sleep. @@ -1115,6 +1163,7 @@ retry_undos: queue.undo = un; queue.pid = current->tgid; queue.id = semid; + queue.alter = alter; if (alter) append_to_queue(sma ,&queue); else @@ -1144,8 +1193,7 @@ retry_undos: sma = sem_lock(semid); if(sma==NULL) { - if(queue.prev != NULL) - BUG(); + BUG_ON(queue.prev != NULL); error = -EIDRM; goto out_free; } @@ -1166,9 +1214,6 @@ retry_undos: remove_from_queue(sma,&queue); goto out_unlock_free; -update: - if (alter) - update_queue (sma); out_unlock_free: sem_unlock(sma); out_free: @@ -1199,8 +1244,6 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) error = get_undo_list(&undo_list); if (error) return error; - if (atomic_read(&undo_list->refcnt) == 1) - spin_lock_init(&undo_list->lock); atomic_inc(&undo_list->refcnt); tsk->sysvsem.undo_list = undo_list; } else @@ -1267,12 +1310,27 @@ found: /* perform adjustments registered in u */ nsems = sma->sem_nsems; for (i = 0; i < nsems; i++) { - struct sem * sem = &sma->sem_base[i]; + struct sem * semaphore = &sma->sem_base[i]; if (u->semadj[i]) { - sem->semval += u->semadj[i]; - if (sem->semval < 0) - sem->semval = 0; /* shouldn't happen */ - sem->sempid = current->tgid; + semaphore->semval += u->semadj[i]; + /* + * Range checks of the new semaphore value, + * not defined by sus: + * - Some unices ignore the undo entirely + * (e.g. HP UX 11i 11.22, Tru64 V5.1) + * - some cap the value (e.g. FreeBSD caps + * at 0, but doesn't enforce SEMVMX) + * + * Linux caps the semaphore value, both at 0 + * and at SEMVMX. + * + * Manfred + */ + if (semaphore->semval < 0) + semaphore->semval = 0; + if (semaphore->semval > SEMVMX) + semaphore->semval = SEMVMX; + semaphore->sempid = current->tgid; } } sma->sem_otime = get_seconds(); @@ -1285,54 +1343,24 @@ next_entry: } #ifdef CONFIG_PROC_FS -static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data) +static int sysvipc_sem_proc_show(struct seq_file *s, void *it) { - off_t pos = 0; - off_t begin = 0; - int i, len = 0; + struct sem_array *sma = it; - len += sprintf(buffer, " key semid perms nsems uid gid cuid cgid otime ctime\n"); - down(&sem_ids.sem); - - for(i = 0; i <= sem_ids.max_id; i++) { - struct sem_array *sma; - sma = sem_lock(i); - if (sma) { - if (!vx_check(sma->sem_perm.xid, VX_IDENT)) { - sem_unlock(sma); - continue; - } - len += sprintf(buffer + len, "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", - sma->sem_perm.key, - sem_buildid(i,sma->sem_perm.seq), - sma->sem_perm.mode, - sma->sem_nsems, - sma->sem_perm.uid, - sma->sem_perm.gid, - sma->sem_perm.cuid, - sma->sem_perm.cgid, - sma->sem_otime, - sma->sem_ctime); - sem_unlock(sma); + if (!vx_check(sma->sem_perm.xid, VX_IDENT)) + return 0; - pos += len; - if(pos < offset) { - len = 0; - begin = pos; - } - if(pos > offset + length) - goto done; - } - } - *eof = 1; -done: - up(&sem_ids.sem); - *start = buffer + (offset - begin); - len -= (offset - begin); - if(len > length) - len = length; - if(len < 0) - len = 0; - return len; + return seq_printf(s, + "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", + sma->sem_perm.key, + sma->sem_id, + sma->sem_perm.mode, + sma->sem_nsems, + sma->sem_perm.uid, + sma->sem_perm.gid, + sma->sem_perm.cuid, + sma->sem_perm.cgid, + sma->sem_otime, + sma->sem_ctime); } #endif