3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Sep 1997 - Call suser() last after "normal" permission checks so we
6 * get BSD style process accounting right.
7 * Occurs in several places in the IPC code.
8 * Chris Evans, <chris@ferret.lmh.ox.ac.uk>
9 * Nov 1999 - ipc helper functions, unified SMP locking
10 * Manfred Spraul <manfreds@colorfullife.com>
11 * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary().
12 * Mingming Cao <cmm@us.ibm.com>
15 #include <linux/config.h>
17 #include <linux/shm.h>
18 #include <linux/init.h>
19 #include <linux/msg.h>
20 #include <linux/smp_lock.h>
21 #include <linux/vmalloc.h>
22 #include <linux/slab.h>
23 #include <linux/highuid.h>
24 #include <linux/security.h>
25 #include <linux/rcupdate.h>
26 #include <linux/workqueue.h>
27 #include <linux/vs_base.h>
29 #include <asm/unistd.h>
34 * ipc_init - initialise IPC subsystem
36 * The various system5 IPC resources (semaphores, messages and shared
37 * memory are initialised
40 static int __init ipc_init(void)
50 * ipc_init_ids - initialise IPC identifiers
51 * @ids: Identifier set
52 * @size: Number of identifiers
54 * Given a size for the ipc identifier range (limited below IPCMNI)
55 * set up the sequence range to use then allocate and initialise the
59 void __init ipc_init_ids(struct ipc_ids* ids, int size)
62 sema_init(&ids->sem,1);
71 int seq_limit = INT_MAX/SEQ_MULTIPLIER;
72 if(seq_limit > USHRT_MAX)
73 ids->seq_max = USHRT_MAX;
75 ids->seq_max = seq_limit;
78 ids->entries = ipc_rcu_alloc(sizeof(struct ipc_id)*size);
80 if(ids->entries == NULL) {
81 printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n");
84 for(i=0;i<ids->size;i++)
85 ids->entries[i].p = NULL;
89 * ipc_findkey - find a key in an ipc identifier set
90 * @ids: Identifier set
91 * @key: The key to find
93 * Requires ipc_ids.sem locked.
94 * Returns the identifier if found or -1 if not.
97 int ipc_findkey(struct ipc_ids* ids, key_t key)
100 struct kern_ipc_perm* p;
101 int max_id = ids->max_id;
104 * rcu_dereference() is not needed here
105 * since ipc_ids.sem is held
107 for (id = 0; id <= max_id; id++) {
108 p = ids->entries[id].p;
111 if (!vx_check(p->xid, VX_IDENT))
120 * Requires ipc_ids.sem locked
122 static int grow_ary(struct ipc_ids* ids, int newsize)
130 if(newsize <= ids->size)
133 new = ipc_rcu_alloc(sizeof(struct ipc_id)*newsize);
136 memcpy(new, ids->entries, sizeof(struct ipc_id)*ids->size);
137 for(i=ids->size;i<newsize;i++) {
143 * before setting the ids->entries to the new array, there must be a
144 * smp_wmb() to make sure the memcpyed contents of the new array are
145 * visible before the new array becomes visible.
147 smp_wmb(); /* prevent seeing new array uninitialized. */
149 smp_wmb(); /* prevent indexing into old array based on new size. */
157 * ipc_addid - add an IPC identifier
158 * @ids: IPC identifier set
159 * @new: new IPC permission set
160 * @size: new size limit for the id array
162 * Add an entry 'new' to the IPC arrays. The permissions object is
163 * initialised and the first free entry is set up and the id assigned
164 * is returned. The list is returned in a locked state on success.
165 * On failure the list is not locked and -1 is returned.
167 * Called with ipc_ids.sem held.
170 int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
174 size = grow_ary(ids,size);
177 * rcu_dereference()() is not needed here since
178 * ipc_ids.sem is held
180 for (id = 0; id < size; id++) {
181 if(ids->entries[id].p == NULL)
187 if (id > ids->max_id)
190 new->cuid = new->uid = current->euid;
191 new->gid = new->cgid = current->egid;
193 new->seq = ids->seq++;
194 if(ids->seq > ids->seq_max)
197 new->lock = SPIN_LOCK_UNLOCKED;
200 spin_lock(&new->lock);
201 ids->entries[id].p = new;
206 * ipc_rmid - remove an IPC identifier
207 * @ids: identifier set
208 * @id: Identifier to remove
210 * The identifier must be valid, and in use. The kernel will panic if
211 * fed an invalid identifier. The entry is removed and internal
212 * variables recomputed. The object associated with the identifier
214 * ipc_ids.sem and the spinlock for this ID is hold before this function
215 * is called, and remain locked on the exit.
218 struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
220 struct kern_ipc_perm* p;
221 int lid = id % SEQ_MULTIPLIER;
226 * do not need a rcu_dereference()() here to force ordering
227 * on Alpha, since the ipc_ids.sem is held.
229 p = ids->entries[lid].p;
230 ids->entries[lid].p = NULL;
235 if (lid == ids->max_id) {
240 } while (ids->entries[lid].p == NULL);
248 * ipc_alloc - allocate ipc space
249 * @size: size desired
251 * Allocate memory from the appropriate pools and return a pointer to it.
252 * NULL is returned if the allocation fails
255 void* ipc_alloc(int size)
261 out = kmalloc(size, GFP_KERNEL);
266 * ipc_free - free ipc space
267 * @ptr: pointer returned by ipc_alloc
268 * @size: size of block
270 * Free a block created with ipc_alloc. The caller must know the size
271 * used in the allocation call.
274 void ipc_free(void* ptr, int size)
284 * There are three headers that are prepended to the actual allocation:
285 * - during use: ipc_rcu_hdr.
286 * - during the rcu grace period: ipc_rcu_grace.
287 * - [only if vmalloc]: ipc_rcu_sched.
288 * Their lifetime doesn't overlap, thus the headers share the same memory.
289 * Unlike a normal union, they are right-aligned, thus some container_of
290 * forward/backward casting is necessary:
303 /* "void *" makes sure alignment of following data is sane. */
309 struct work_struct work;
310 /* "void *" makes sure alignment of following data is sane. */
314 #define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \
315 sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr))
316 #define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \
317 sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC)
319 static inline int rcu_use_vmalloc(int size)
321 /* Too big for a single page? */
322 if (HDRLEN_KMALLOC + size > PAGE_SIZE)
328 * ipc_rcu_alloc - allocate ipc and rcu space
329 * @size: size desired
331 * Allocate memory for the rcu header structure + the object.
332 * Returns the pointer to the object.
333 * NULL is returned if the allocation fails.
336 void* ipc_rcu_alloc(int size)
340 * We prepend the allocation with the rcu struct, and
341 * workqueue if necessary (for vmalloc).
343 if (rcu_use_vmalloc(size)) {
344 out = vmalloc(HDRLEN_VMALLOC + size);
346 out += HDRLEN_VMALLOC;
347 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
348 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
351 out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
353 out += HDRLEN_KMALLOC;
354 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
355 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
362 void ipc_rcu_getref(void *ptr)
364 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
368 * ipc_schedule_free - free ipc + rcu space
370 * Since RCU callback function is called in bh,
371 * we need to defer the vfree to schedule_work
373 static void ipc_schedule_free(struct rcu_head *head)
375 struct ipc_rcu_grace *grace =
376 container_of(head, struct ipc_rcu_grace, rcu);
377 struct ipc_rcu_sched *sched =
378 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]);
380 INIT_WORK(&sched->work, vfree, sched);
381 schedule_work(&sched->work);
385 * ipc_immediate_free - free ipc + rcu space
387 * Free from the RCU callback context
390 static void ipc_immediate_free(struct rcu_head *head)
392 struct ipc_rcu_grace *free =
393 container_of(head, struct ipc_rcu_grace, rcu);
397 void ipc_rcu_putref(void *ptr)
399 if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
402 if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) {
403 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
406 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
412 * ipcperms - check IPC permissions
413 * @ipcp: IPC permission set
414 * @flag: desired permission set.
416 * Check user, group, other permissions for access
417 * to ipc resources. return 0 if allowed
420 int ipcperms (struct kern_ipc_perm *ipcp, short flag)
421 { /* flag will most probably be 0 or S_...UGO from <linux/stat.h> */
422 int requested_mode, granted_mode;
424 if (!vx_check(ipcp->xid, VX_ADMIN|VX_IDENT)) /* maybe just VX_IDENT? */
426 requested_mode = (flag >> 6) | (flag >> 3) | flag;
427 granted_mode = ipcp->mode;
428 if (current->euid == ipcp->cuid || current->euid == ipcp->uid)
430 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
432 /* is there some bit set in requested_mode but not in granted_mode? */
433 if ((requested_mode & ~granted_mode & 0007) &&
434 !capable(CAP_IPC_OWNER))
437 return security_ipc_permission(ipcp, flag);
441 * Functions to convert between the kern_ipc_perm structure and the
442 * old/new ipc_perm structures
446 * kernel_to_ipc64_perm - convert kernel ipc permissions to user
447 * @in: kernel permissions
448 * @out: new style IPC permissions
450 * Turn the kernel object 'in' into a set of permissions descriptions
451 * for returning to userspace (out).
455 void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
460 out->cuid = in->cuid;
461 out->cgid = in->cgid;
462 out->mode = in->mode;
467 * ipc64_perm_to_ipc_perm - convert old ipc permissions to new
468 * @in: new style IPC permissions
469 * @out: old style IPC permissions
471 * Turn the new style permissions object in into a compatibility
472 * object and store it into the 'out' pointer.
475 void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
478 SET_UID(out->uid, in->uid);
479 SET_GID(out->gid, in->gid);
480 SET_UID(out->cuid, in->cuid);
481 SET_GID(out->cgid, in->cgid);
482 out->mode = in->mode;
487 * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get()
488 * is called with shm_ids.sem locked. Since grow_ary() is also called with
489 * shm_ids.sem down(for Shared Memory), there is no need to add read
490 * barriers here to gurantee the writes in grow_ary() are seen in order
493 * However ipc_get() itself does not necessary require ipc_ids.sem down. So
494 * if in the future ipc_get() is used by other places without ipc_ids.sem
495 * down, then ipc_get() needs read memery barriers as ipc_lock() does.
497 struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id)
499 struct kern_ipc_perm* out;
500 int lid = id % SEQ_MULTIPLIER;
503 out = ids->entries[lid].p;
507 struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id)
509 struct kern_ipc_perm* out;
510 int lid = id % SEQ_MULTIPLIER;
511 struct ipc_id* entries;
514 if(lid >= ids->size) {
520 * Note: The following two read barriers are corresponding
521 * to the two write barriers in grow_ary(). They guarantee
522 * the writes are seen in the same order on the read side.
523 * smp_rmb() has effect on all CPUs. rcu_dereference()
524 * is used if there are data dependency between two reads, and
525 * has effect only on Alpha.
527 smp_rmb(); /* prevent indexing old array with new size */
528 entries = rcu_dereference(ids->entries);
529 out = entries[lid].p;
534 spin_lock(&out->lock);
536 /* ipc_rmid() may have already freed the ID while ipc_lock
537 * was spinning: here verify that the structure is still valid
540 spin_unlock(&out->lock);
547 void ipc_lock_by_ptr(struct kern_ipc_perm *perm)
550 spin_lock(&perm->lock);
553 void ipc_unlock(struct kern_ipc_perm* perm)
555 spin_unlock(&perm->lock);
559 int ipc_buildid(struct ipc_ids* ids, int id, int seq)
561 return SEQ_MULTIPLIER*seq + id;
564 int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid)
566 if(uid/SEQ_MULTIPLIER != ipcp->seq)
571 #ifdef __ARCH_WANT_IPC_PARSE_VERSION
575 * ipc_parse_version - IPC call version
576 * @cmd: pointer to command
578 * Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
579 * The cmd value is turned from an encoding command and version into
580 * just the command code.
583 int ipc_parse_version (int *cmd)
593 #endif /* __ARCH_WANT_IPC_PARSE_VERSION */