3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
18 #include <linux/config.h>
19 #include <linux/slab.h>
21 #include <linux/hugetlb.h>
22 #include <linux/shm.h>
23 #include <linux/init.h>
24 #include <linux/file.h>
25 #include <linux/mman.h>
26 #include <linux/proc_fs.h>
27 #include <linux/shmem_fs.h>
28 #include <linux/security.h>
29 #include <linux/syscalls.h>
30 #include <linux/audit.h>
31 #include <linux/ptrace.h>
32 #include <linux/vs_limit.h>
34 #include <asm/uaccess.h>
38 #define shm_flags shm_perm.mode
40 static struct file_operations shm_file_operations;
41 static struct vm_operations_struct shm_vm_ops;
43 static struct ipc_ids shm_ids;
45 #define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id))
46 #define shm_unlock(shp) ipc_unlock(&(shp)->shm_perm)
47 #define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id))
48 #define shm_buildid(id, seq) \
49 ipc_buildid(&shm_ids, id, seq)
51 static int newseg (key_t key, int shmflg, size_t size);
52 static void shm_open (struct vm_area_struct *shmd);
53 static void shm_close (struct vm_area_struct *shmd);
55 static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
58 size_t shm_ctlmax = SHMMAX;
59 size_t shm_ctlall = SHMALL;
60 int shm_ctlmni = SHMMNI;
62 static int shm_tot; /* total number of shared memory pages */
64 void __init shm_init (void)
66 ipc_init_ids(&shm_ids, 1);
68 create_proc_read_entry("sysvipc/shm", 0, NULL, sysvipc_shm_read_proc, NULL);
72 static inline int shm_checkid(struct shmid_kernel *s, int id)
74 if (ipc_checkid(&shm_ids,&s->shm_perm,id))
79 static inline struct shmid_kernel *shm_rmid(int id)
81 return (struct shmid_kernel *)ipc_rmid(&shm_ids,id);
84 static inline int shm_addid(struct shmid_kernel *shp)
86 return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni);
91 static inline void shm_inc (int id) {
92 struct shmid_kernel *shp;
94 if(!(shp = shm_lock(id)))
96 shp->shm_atim = get_seconds();
97 shp->shm_lprid = current->tgid;
102 /* This is called by fork, once for every shm attach. */
103 static void shm_open (struct vm_area_struct *shmd)
105 shm_inc (shmd->vm_file->f_dentry->d_inode->i_ino);
109 * shm_destroy - free the struct shmid_kernel
111 * @shp: struct to free
113 * It has to be called with shp and shm_ids.sem locked,
114 * but returns with shp unlocked and freed.
116 static void shm_destroy (struct shmid_kernel *shp)
118 struct vx_info *vxi = locate_vx_info(shp->shm_perm.xid);
119 int numpages = (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
121 vx_ipcshm_sub(vxi, shp, numpages);
126 if (!is_file_hugepages(shp->shm_file))
127 shmem_lock(shp->shm_file, 0, shp->mlock_user);
129 user_shm_unlock(shp->shm_file->f_dentry->d_inode->i_size,
131 fput (shp->shm_file);
132 security_shm_free(shp);
138 * remove the attach descriptor shmd.
139 * free memory for segment if it is marked destroyed.
140 * The descriptor has already been removed from the current->mm->mmap list
141 * and will later be kfree()d.
143 static void shm_close (struct vm_area_struct *shmd)
145 struct file * file = shmd->vm_file;
146 int id = file->f_dentry->d_inode->i_ino;
147 struct shmid_kernel *shp;
150 /* remove from the list of attaches of the shm segment */
151 if(!(shp = shm_lock(id)))
153 shp->shm_lprid = current->tgid;
154 shp->shm_dtim = get_seconds();
156 if(shp->shm_nattch == 0 &&
157 shp->shm_flags & SHM_DEST)
164 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
167 vma->vm_ops = &shm_vm_ops;
168 shm_inc(file->f_dentry->d_inode->i_ino);
172 static struct file_operations shm_file_operations = {
176 static struct vm_operations_struct shm_vm_ops = {
177 .open = shm_open, /* callback for a new vm-area open */
178 .close = shm_close, /* callback for when the vm-area is released */
179 .nopage = shmem_nopage,
181 .set_policy = shmem_set_policy,
182 .get_policy = shmem_get_policy,
186 static int newseg (key_t key, int shmflg, size_t size)
189 struct shmid_kernel *shp;
190 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
195 if (size < SHMMIN || size > shm_ctlmax)
198 if (shm_tot + numpages >= shm_ctlall)
200 if (!vx_ipcshm_avail(current->vx_info, numpages))
203 shp = ipc_rcu_alloc(sizeof(*shp));
207 shp->shm_perm.key = key;
208 shp->shm_perm.xid = vx_current_xid();
209 shp->shm_flags = (shmflg & S_IRWXUGO);
210 shp->mlock_user = NULL;
212 shp->shm_perm.security = NULL;
213 error = security_shm_alloc(shp);
219 if (shmflg & SHM_HUGETLB) {
220 /* hugetlb_zero_setup takes care of mlock user accounting */
221 file = hugetlb_zero_setup(size);
222 shp->mlock_user = current->user;
224 sprintf (name, "SYSV%08x", key);
225 file = shmem_file_setup(name, size, VM_ACCOUNT);
227 error = PTR_ERR(file);
236 shp->shm_cprid = current->tgid;
238 shp->shm_atim = shp->shm_dtim = 0;
239 shp->shm_ctim = get_seconds();
240 shp->shm_segsz = size;
242 shp->id = shm_buildid(id,shp->shm_perm.seq);
243 shp->shm_file = file;
244 file->f_dentry->d_inode->i_ino = shp->id;
245 if (shmflg & SHM_HUGETLB)
246 set_file_hugepages(file);
248 file->f_op = &shm_file_operations;
250 vx_ipcshm_add(current->vx_info, key, numpages);
257 security_shm_free(shp);
262 asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
264 struct shmid_kernel *shp;
268 if (key == IPC_PRIVATE) {
269 err = newseg(key, shmflg, size);
270 } else if ((id = ipc_findkey(&shm_ids, key)) == -1) {
271 if (!(shmflg & IPC_CREAT))
274 err = newseg(key, shmflg, size);
275 } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) {
281 if (shp->shm_segsz < size)
283 else if (ipcperms(&shp->shm_perm, shmflg))
286 int shmid = shm_buildid(id, shp->shm_perm.seq);
287 err = security_shm_associate(shp, shmflg);
298 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
302 return copy_to_user(buf, in, sizeof(*in));
307 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
308 out.shm_segsz = in->shm_segsz;
309 out.shm_atime = in->shm_atime;
310 out.shm_dtime = in->shm_dtime;
311 out.shm_ctime = in->shm_ctime;
312 out.shm_cpid = in->shm_cpid;
313 out.shm_lpid = in->shm_lpid;
314 out.shm_nattch = in->shm_nattch;
316 return copy_to_user(buf, &out, sizeof(out));
329 static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
334 struct shmid64_ds tbuf;
336 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
339 out->uid = tbuf.shm_perm.uid;
340 out->gid = tbuf.shm_perm.gid;
341 out->mode = tbuf.shm_flags;
347 struct shmid_ds tbuf_old;
349 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
352 out->uid = tbuf_old.shm_perm.uid;
353 out->gid = tbuf_old.shm_perm.gid;
354 out->mode = tbuf_old.shm_flags;
363 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
367 return copy_to_user(buf, in, sizeof(*in));
372 if(in->shmmax > INT_MAX)
373 out.shmmax = INT_MAX;
375 out.shmmax = (int)in->shmmax;
377 out.shmmin = in->shmmin;
378 out.shmmni = in->shmmni;
379 out.shmseg = in->shmseg;
380 out.shmall = in->shmall;
382 return copy_to_user(buf, &out, sizeof(out));
389 static void shm_get_stat(unsigned long *rss, unsigned long *swp)
396 for (i = 0; i <= shm_ids.max_id; i++) {
397 struct shmid_kernel *shp;
404 inode = shp->shm_file->f_dentry->d_inode;
406 if (is_file_hugepages(shp->shm_file)) {
407 struct address_space *mapping = inode->i_mapping;
408 *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
410 struct shmem_inode_info *info = SHMEM_I(inode);
411 spin_lock(&info->lock);
412 *rss += inode->i_mapping->nrpages;
413 *swp += info->swapped;
414 spin_unlock(&info->lock);
419 asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
421 struct shm_setbuf setbuf;
422 struct shmid_kernel *shp;
425 if (cmd < 0 || shmid < 0) {
430 version = ipc_parse_version(&cmd);
432 switch (cmd) { /* replace with proc interface ? */
435 struct shminfo64 shminfo;
437 err = security_shm_shmctl(NULL, cmd);
441 memset(&shminfo,0,sizeof(shminfo));
442 shminfo.shmmni = shminfo.shmseg = shm_ctlmni;
443 shminfo.shmmax = shm_ctlmax;
444 shminfo.shmall = shm_ctlall;
446 shminfo.shmmin = SHMMIN;
447 if(copy_shminfo_to_user (buf, &shminfo, version))
449 /* reading a integer is always atomic */
457 struct shm_info shm_info;
459 err = security_shm_shmctl(NULL, cmd);
463 memset(&shm_info,0,sizeof(shm_info));
465 shm_info.used_ids = shm_ids.in_use;
466 shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
467 shm_info.shm_tot = shm_tot;
468 shm_info.swap_attempts = 0;
469 shm_info.swap_successes = 0;
470 err = shm_ids.max_id;
472 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
477 err = err < 0 ? 0 : err;
483 struct shmid64_ds tbuf;
485 memset(&tbuf, 0, sizeof(tbuf));
486 shp = shm_lock(shmid);
490 } else if(cmd==SHM_STAT) {
492 if (shmid > shm_ids.max_id)
494 result = shm_buildid(shmid, shp->shm_perm.seq);
496 err = shm_checkid(shp,shmid);
502 if (ipcperms (&shp->shm_perm, S_IRUGO))
504 err = security_shm_shmctl(shp, cmd);
507 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
508 tbuf.shm_segsz = shp->shm_segsz;
509 tbuf.shm_atime = shp->shm_atim;
510 tbuf.shm_dtime = shp->shm_dtim;
511 tbuf.shm_ctime = shp->shm_ctim;
512 tbuf.shm_cpid = shp->shm_cprid;
513 tbuf.shm_lpid = shp->shm_lprid;
514 if (!is_file_hugepages(shp->shm_file))
515 tbuf.shm_nattch = shp->shm_nattch;
517 tbuf.shm_nattch = file_count(shp->shm_file) - 1;
519 if(copy_shmid_to_user (buf, &tbuf, version))
528 shp = shm_lock(shmid);
533 err = shm_checkid(shp,shmid);
537 if (!capable(CAP_IPC_LOCK)) {
539 if (current->euid != shp->shm_perm.uid &&
540 current->euid != shp->shm_perm.cuid)
542 if (cmd == SHM_LOCK &&
543 !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
547 err = security_shm_shmctl(shp, cmd);
552 struct user_struct * user = current->user;
553 if (!is_file_hugepages(shp->shm_file)) {
554 err = shmem_lock(shp->shm_file, 1, user);
556 shp->shm_flags |= SHM_LOCKED;
557 shp->mlock_user = user;
560 } else if (!is_file_hugepages(shp->shm_file)) {
561 shmem_lock(shp->shm_file, 0, shp->mlock_user);
562 shp->shm_flags &= ~SHM_LOCKED;
563 shp->mlock_user = NULL;
571 * We cannot simply remove the file. The SVID states
572 * that the block remains until the last person
573 * detaches from it, then is deleted. A shmat() on
574 * an RMID segment is legal in older Linux and if
575 * we change it apps break...
577 * Instead we set a destroyed flag, and then blow
578 * the name away when the usage hits zero.
581 shp = shm_lock(shmid);
585 err = shm_checkid(shp, shmid);
589 if (current->euid != shp->shm_perm.uid &&
590 current->euid != shp->shm_perm.cuid &&
591 !capable(CAP_SYS_ADMIN)) {
596 err = security_shm_shmctl(shp, cmd);
600 if (shp->shm_nattch){
601 shp->shm_flags |= SHM_DEST;
602 /* Do not find it any more */
603 shp->shm_perm.key = IPC_PRIVATE;
613 if (copy_shmid_from_user (&setbuf, buf, version)) {
617 if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode)))
620 shp = shm_lock(shmid);
624 err = shm_checkid(shp,shmid);
628 if (current->euid != shp->shm_perm.uid &&
629 current->euid != shp->shm_perm.cuid &&
630 !capable(CAP_SYS_ADMIN)) {
634 err = security_shm_shmctl(shp, cmd);
638 shp->shm_perm.uid = setbuf.uid;
639 shp->shm_perm.gid = setbuf.gid;
640 shp->shm_flags = (shp->shm_flags & ~S_IRWXUGO)
641 | (setbuf.mode & S_IRWXUGO);
642 shp->shm_ctim = get_seconds();
664 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
666 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
667 * "raddr" thing points to kernel space, and there has to be a wrapper around
670 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
672 struct shmid_kernel *shp;
679 unsigned long o_flags;
686 } else if ((addr = (ulong)shmaddr)) {
687 if (addr & (SHMLBA-1)) {
688 if (shmflg & SHM_RND)
689 addr &= ~(SHMLBA-1); /* round down */
691 #ifndef __ARCH_FORCE_SHMLBA
692 if (addr & ~PAGE_MASK)
696 flags = MAP_SHARED | MAP_FIXED;
698 if ((shmflg & SHM_REMAP))
704 if (shmflg & SHM_RDONLY) {
709 prot = PROT_READ | PROT_WRITE;
711 acc_mode = S_IRUGO | S_IWUGO;
713 if (shmflg & SHM_EXEC) {
719 * We cannot rely on the fs check since SYSV IPC does have an
720 * additional creator id...
722 shp = shm_lock(shmid);
727 err = shm_checkid(shp,shmid);
732 if (ipcperms(&shp->shm_perm, acc_mode)) {
738 err = security_shm_shmat(shp, shmaddr, shmflg);
744 file = shp->shm_file;
745 size = i_size_read(file->f_dentry->d_inode);
749 down_write(¤t->mm->mmap_sem);
750 if (addr && !(shmflg & SHM_REMAP)) {
751 user_addr = ERR_PTR(-EINVAL);
752 if (find_vma_intersection(current->mm, addr, addr + size))
755 * If shm segment goes below stack, make sure there is some
756 * space left for the stack to grow (at least 4 pages).
758 if (addr < current->mm->start_stack &&
759 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
763 user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0);
766 up_write(¤t->mm->mmap_sem);
769 if(!(shp = shm_lock(shmid)))
772 if(shp->shm_nattch == 0 &&
773 shp->shm_flags & SHM_DEST)
779 *raddr = (unsigned long) user_addr;
781 if (IS_ERR(user_addr))
782 err = PTR_ERR(user_addr);
787 asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
792 err = do_shmat(shmid, shmaddr, shmflg, &ret);
795 force_successful_syscall_return();
800 * detach and kill segment if marked destroyed.
801 * The work is done in shm_close.
803 asmlinkage long sys_shmdt(char __user *shmaddr)
805 struct mm_struct *mm = current->mm;
806 struct vm_area_struct *vma, *next;
807 unsigned long addr = (unsigned long)shmaddr;
809 int retval = -EINVAL;
811 down_write(&mm->mmap_sem);
814 * This function tries to be smart and unmap shm segments that
815 * were modified by partial mlock or munmap calls:
816 * - It first determines the size of the shm segment that should be
817 * unmapped: It searches for a vma that is backed by shm and that
818 * started at address shmaddr. It records it's size and then unmaps
820 * - Then it unmaps all shm vmas that started at shmaddr and that
821 * are within the initially determined size.
822 * Errors from do_munmap are ignored: the function only fails if
823 * it's called with invalid parameters or if it's called to unmap
824 * a part of a vma. Both calls in this function are for full vmas,
825 * the parameters are directly copied from the vma itself and always
826 * valid - therefore do_munmap cannot fail. (famous last words?)
829 * If it had been mremap()'d, the starting address would not
830 * match the usual checks anyway. So assume all vma's are
831 * above the starting address given.
833 vma = find_vma(mm, addr);
839 * Check if the starting address would match, i.e. it's
840 * a fragment created by mprotect() and/or munmap(), or it
841 * otherwise it starts at this address with no hassles.
843 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) &&
844 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
847 size = vma->vm_file->f_dentry->d_inode->i_size;
848 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
850 * We discovered the size of the shm segment, so
851 * break out of here and fall through to the next
852 * loop that uses the size information to stop
853 * searching for matching vma's.
863 * We need look no further than the maximum address a fragment
864 * could possibly have landed at. Also cast things to loff_t to
865 * prevent overflows and make comparisions vs. equal-width types.
867 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
870 /* finding a matching vma now does not alter retval */
871 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) &&
872 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
874 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
878 up_write(&mm->mmap_sem);
882 #ifdef CONFIG_PROC_FS
883 static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
890 len += sprintf(buffer, " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n");
892 for(i = 0; i <= shm_ids.max_id; i++) {
893 struct shmid_kernel* shp;
897 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
898 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
901 if (!vx_check(shp->shm_perm.xid, VX_IDENT)) {
905 if (sizeof(size_t) <= sizeof(int))
906 format = SMALL_STRING;
909 len += sprintf(buffer + len, format,
911 shm_buildid(i, shp->shm_perm.seq),
916 is_file_hugepages(shp->shm_file) ? (file_count(shp->shm_file) - 1) : shp->shm_nattch,
931 if(pos > offset + length)
938 *start = buffer + (offset - begin);
939 len -= (offset - begin);