X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ipc%2Fsem.c;h=416d06aff20e4db0bab8f3a4df642e6d4b9510e3;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=3725673556848f91900c9778c6fce4896fec7013;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/ipc/sem.c b/ipc/sem.c index 372567355..416d06aff 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -71,6 +71,7 @@ #include #include #include +#include #include #include "util.h" @@ -116,7 +117,7 @@ void __init sem_init (void) ipc_init_ids(&sem_ids,sc_semmni); #ifdef CONFIG_PROC_FS - create_proc_read_entry("sysvipc/sem", 0, 0, sysvipc_sem_read_proc, NULL); + create_proc_read_entry("sysvipc/sem", 0, NULL, sysvipc_sem_read_proc, NULL); #endif } @@ -175,18 +176,19 @@ static int newary (key_t key, int nsems, int semflg) sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.key = key; + sma->sem_perm.xid = vx_current_xid(); sma->sem_perm.security = NULL; retval = security_sem_alloc(sma); if (retval) { - ipc_rcu_free(sma, size); + ipc_rcu_putref(sma); return retval; } id = ipc_addid(&sem_ids, &sma->sem_perm, sc_semmni); if(id == -1) { security_sem_free(sma); - ipc_rcu_free(sma, size); + ipc_rcu_putref(sma); return -ENOSPC; } used_sems += nsems; @@ -241,25 +243,6 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) return err; } -/* doesn't acquire the sem_lock on error! */ -static int sem_revalidate(int semid, struct sem_array* sma, int nsems, short flg) -{ - struct sem_array* smanew; - - smanew = sem_lock(semid); - if(smanew==NULL) - return -EIDRM; - if(smanew != sma || sem_checkid(sma,semid) || sma->sem_nsems != nsems) { - sem_unlock(smanew); - return -EIDRM; - } - - if (flg && ipcperms(&sma->sem_perm, flg)) { - sem_unlock(smanew); - return -EACCES; - } - return 0; -} /* Manage the doubly linked list sma->sem_pending as a FIFO: * insert new queue elements at the tail sma->sem_pending_last. */ @@ -376,8 +359,22 @@ static void update_queue (struct sem_array * sma) if (error <= 0) { struct sem_queue *n; remove_from_queue(sma,q); - n = q->next; q->status = IN_WAKEUP; + /* + * Continue scanning. The next operation + * that must be checked depends on the type of the + * completed operation: + * - if the operation modified the array, then + * restart from the head of the queue and + * check for threads that might be waiting + * for semaphore values to become 0. + * - if the operation didn't modify the array, + * then just continue. + */ + if (q->alter) + n = sma->sem_pending; + else + n = q->next; wake_up_process(q->sleeper); /* hands-off: q will disappear immediately after * writing q->status. @@ -473,7 +470,7 @@ static void freeary (struct sem_array *sma, int id) used_sems -= sma->sem_nsems; size = sizeof (*sma) + sma->sem_nsems * sizeof (struct sem); security_sem_free(sma); - ipc_rcu_free(sma, size); + ipc_rcu_putref(sma); } static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) @@ -542,7 +539,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu struct semid64_ds tbuf; int id; - if(semid >= sem_ids.size) + if(semid >= sem_ids.entries->size) return -EINVAL; memset(&tbuf,0,sizeof(tbuf)); @@ -610,17 +607,28 @@ static int semctl_main(int semid, int semnum, int cmd, int version, union semun switch (cmd) { case GETALL: { - ushort *array = arg.array; + ushort __user *array = arg.array; int i; if(nsems > SEMMSL_FAST) { + ipc_rcu_getref(sma); sem_unlock(sma); + sem_io = ipc_alloc(sizeof(ushort)*nsems); - if(sem_io == NULL) + if(sem_io == NULL) { + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + sem_unlock(sma); return -ENOMEM; - err = sem_revalidate(semid, sma, nsems, S_IRUGO); - if(err) + } + + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + if (sma->sem_perm.deleted) { + sem_unlock(sma); + err = -EIDRM; goto out_free; + } } for (i = 0; i < sma->sem_nsems; i++) @@ -636,28 +644,43 @@ static int semctl_main(int semid, int semnum, int cmd, int version, union semun int i; struct sem_undo *un; + ipc_rcu_getref(sma); sem_unlock(sma); if(nsems > SEMMSL_FAST) { sem_io = ipc_alloc(sizeof(ushort)*nsems); - if(sem_io == NULL) + if(sem_io == NULL) { + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + sem_unlock(sma); return -ENOMEM; + } } if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + sem_unlock(sma); err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + sem_unlock(sma); err = -ERANGE; goto out_free; } } - err = sem_revalidate(semid, sma, nsems, S_IWUGO); - if(err) + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + if (sma->sem_perm.deleted) { + sem_unlock(sma); + err = -EIDRM; goto out_free; + } for (i = 0; i < nsems; i++) sma->sem_base[i].semval = sem_io[i]; @@ -977,11 +1000,16 @@ static struct sem_undo *find_undo(int semid) goto out; } nsems = sma->sem_nsems; + ipc_rcu_getref(sma); sem_unlock(sma); new = (struct sem_undo *) kmalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); - if (!new) + if (!new) { + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + sem_unlock(sma); return ERR_PTR(-ENOMEM); + } memset(new, 0, sizeof(struct sem_undo) + sizeof(short)*nsems); new->semadj = (short *) &new[1]; new->semid = semid; @@ -991,13 +1019,18 @@ static struct sem_undo *find_undo(int semid) if (un) { unlock_semundo(); kfree(new); + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + sem_unlock(sma); goto out; } - error = sem_revalidate(semid, sma, nsems, 0); - if (error) { + ipc_lock_by_ptr(&sma->sem_perm); + ipc_rcu_putref(sma); + if (sma->sem_perm.deleted) { + sem_unlock(sma); unlock_semundo(); kfree(new); - un = ERR_PTR(error); + un = ERR_PTR(-EIDRM); goto out; } new->proc_next = ulp->proc_list; @@ -1101,8 +1134,11 @@ retry_undos: goto out_unlock_free; error = try_atomic_semop (sma, sops, nsops, un, current->tgid); - if (error <= 0) - goto update; + if (error <= 0) { + if (alter && error == 0) + update_queue (sma); + goto out_unlock_free; + } /* We need to sleep on this operation, so we put the current * task into the pending queue and go to sleep. @@ -1114,6 +1150,7 @@ retry_undos: queue.undo = un; queue.pid = current->tgid; queue.id = semid; + queue.alter = alter; if (alter) append_to_queue(sma ,&queue); else @@ -1165,9 +1202,6 @@ retry_undos: remove_from_queue(sma,&queue); goto out_unlock_free; -update: - if (alter) - update_queue (sma); out_unlock_free: sem_unlock(sma); out_free: @@ -1269,8 +1303,23 @@ found: struct sem * sem = &sma->sem_base[i]; if (u->semadj[i]) { sem->semval += u->semadj[i]; + /* + * Range checks of the new semaphore value, + * not defined by sus: + * - Some unices ignore the undo entirely + * (e.g. HP UX 11i 11.22, Tru64 V5.1) + * - some cap the value (e.g. FreeBSD caps + * at 0, but doesn't enforce SEMVMX) + * + * Linux caps the semaphore value, both at 0 + * and at SEMVMX. + * + * Manfred + */ if (sem->semval < 0) - sem->semval = 0; /* shouldn't happen */ + sem->semval = 0; + if (sem->semval > SEMVMX) + sem->semval = SEMVMX; sem->sempid = current->tgid; } } @@ -1296,7 +1345,11 @@ static int sysvipc_sem_read_proc(char *buffer, char **start, off_t offset, int l for(i = 0; i <= sem_ids.max_id; i++) { struct sem_array *sma; sma = sem_lock(i); - if(sma) { + if (sma) { + if (!vx_check(sma->sem_perm.xid, VX_IDENT)) { + sem_unlock(sma); + continue; + } len += sprintf(buffer + len, "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", sma->sem_perm.key, sem_buildid(i,sma->sem_perm.seq),