2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
10 #include <linux/config.h>
11 #include <linux/a.out.h>
12 #include <linux/capability.h>
13 #include <linux/errno.h>
14 #include <linux/linkage.h>
16 #include <linux/smp.h>
17 #include <linux/smp_lock.h>
18 #include <linux/mman.h>
19 #include <linux/ptrace.h>
20 #include <linux/sched.h>
21 #include <linux/string.h>
22 #include <linux/syscalls.h>
23 #include <linux/file.h>
24 #include <linux/slab.h>
25 #include <linux/utsname.h>
26 #include <linux/unistd.h>
27 #include <linux/sem.h>
28 #include <linux/msg.h>
29 #include <linux/shm.h>
30 #include <linux/compiler.h>
31 #include <linux/module.h>
32 #include <linux/vs_cvirt.h>
34 #include <asm/branch.h>
35 #include <asm/cachectl.h>
36 #include <asm/cacheflush.h>
38 #include <asm/asm-offsets.h>
39 #include <asm/signal.h>
41 #include <asm/shmparam.h>
42 #include <asm/sysmips.h>
43 #include <asm/uaccess.h>
45 asmlinkage int sys_pipe(nabi_no_regargs volatile struct pt_regs regs)
61 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
63 EXPORT_SYMBOL(shm_align_mask);
65 #define COLOUR_ALIGN(addr,pgoff) \
66 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
67 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
69 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
70 unsigned long len, unsigned long pgoff, unsigned long flags)
72 struct vm_area_struct * vmm;
74 unsigned long task_size;
76 task_size = STACK_TOP;
78 if (flags & MAP_FIXED) {
80 * We do not accept a shared mapping if it would violate
81 * cache aliasing constraints.
83 if ((flags & MAP_SHARED) && (addr & shm_align_mask))
91 if (filp || (flags & MAP_SHARED))
95 addr = COLOUR_ALIGN(addr, pgoff);
97 addr = PAGE_ALIGN(addr);
98 vmm = find_vma(current->mm, addr);
99 if (task_size - len >= addr &&
100 (!vmm || addr + len <= vmm->vm_start))
103 addr = TASK_UNMAPPED_BASE;
105 addr = COLOUR_ALIGN(addr, pgoff);
107 addr = PAGE_ALIGN(addr);
109 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
110 /* At this point: (!vmm || addr < vmm->vm_end). */
111 if (task_size - len < addr)
113 if (!vmm || addr + len <= vmm->vm_start)
117 addr = COLOUR_ALIGN(addr, pgoff);
121 /* common code for old and new mmaps */
122 static inline unsigned long
123 do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
124 unsigned long flags, unsigned long fd, unsigned long pgoff)
126 unsigned long error = -EBADF;
127 struct file * file = NULL;
129 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
130 if (!(flags & MAP_ANONYMOUS)) {
136 down_write(¤t->mm->mmap_sem);
137 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
138 up_write(¤t->mm->mmap_sem);
146 asmlinkage unsigned long
147 old_mmap(unsigned long addr, unsigned long len, int prot,
148 int flags, int fd, off_t offset)
150 unsigned long result;
153 if (offset & ~PAGE_MASK)
156 result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
162 asmlinkage unsigned long
163 sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
164 unsigned long flags, unsigned long fd, unsigned long pgoff)
166 return do_mmap2(addr, len, prot, flags, fd, pgoff);
169 save_static_function(sys_fork);
170 __attribute_used__ noinline static int
171 _sys_fork(nabi_no_regargs struct pt_regs regs)
173 return do_fork(SIGCHLD, regs.regs[29], ®s, 0, NULL, NULL);
176 save_static_function(sys_clone);
177 __attribute_used__ noinline static int
178 _sys_clone(nabi_no_regargs struct pt_regs regs)
180 unsigned long clone_flags;
182 int __user *parent_tidptr, *child_tidptr;
184 clone_flags = regs.regs[4];
185 newsp = regs.regs[5];
187 newsp = regs.regs[29];
188 parent_tidptr = (int __user *) regs.regs[6];
190 /* We need to fetch the fifth argument off the stack. */
192 if (clone_flags & (CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)) {
193 int __user *__user *usp = (int __user *__user *) regs.regs[29];
194 if (regs.regs[2] == __NR_syscall) {
195 if (get_user (child_tidptr, &usp[5]))
198 else if (get_user (child_tidptr, &usp[4]))
202 child_tidptr = (int __user *) regs.regs[8];
204 return do_fork(clone_flags, newsp, ®s, 0,
205 parent_tidptr, child_tidptr);
209 * sys_execve() executes a new program.
211 asmlinkage int sys_execve(nabi_no_regargs struct pt_regs regs)
216 filename = getname((char __user *) (long)regs.regs[4]);
217 error = PTR_ERR(filename);
218 if (IS_ERR(filename))
220 error = do_execve(filename, (char __user *__user *) (long)regs.regs[5],
221 (char __user *__user *) (long)regs.regs[6], ®s);
229 * Compacrapability ...
231 asmlinkage int sys_uname(struct old_utsname __user * name)
233 if (name && !copy_to_user(name, vx_new_utsname(), sizeof (*name)))
239 * Compacrapability ...
241 asmlinkage int sys_olduname(struct oldold_utsname __user * name)
244 struct new_utsname *ptr;
248 if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
251 ptr = vx_new_utsname();
252 error = __copy_to_user(&name->sysname,ptr->sysname,__OLD_UTS_LEN);
253 error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
254 error -= __copy_to_user(&name->nodename,ptr->nodename,__OLD_UTS_LEN);
255 error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
256 error -= __copy_to_user(&name->release,ptr->release,__OLD_UTS_LEN);
257 error -= __put_user(0,name->release+__OLD_UTS_LEN);
258 error -= __copy_to_user(&name->version,ptr->version,__OLD_UTS_LEN);
259 error -= __put_user(0,name->version+__OLD_UTS_LEN);
260 error -= __copy_to_user(&name->machine,ptr->machine,__OLD_UTS_LEN);
261 error = __put_user(0,name->machine+__OLD_UTS_LEN);
262 error = error ? -EFAULT : 0;
267 void sys_set_thread_area(unsigned long addr)
269 struct thread_info *ti = task_thread_info(current);
273 /* If some future MIPS implementation has this register in hardware,
274 * we will need to update it here (and in context switches). */
277 asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
284 char nodename[__NEW_UTS_LEN + 1];
286 if (!capable(CAP_SYS_ADMIN))
289 name = (char __user *) arg1;
291 len = strncpy_from_user(nodename, name, __NEW_UTS_LEN);
295 down_write(&uts_sem);
296 strncpy(vx_new_uts(nodename), nodename, len);
297 nodename[__NEW_UTS_LEN] = '\0';
298 strlcpy(vx_new_uts(nodename), nodename,
299 sizeof(vx_new_uts(nodename)));
304 case MIPS_ATOMIC_SET:
305 printk(KERN_CRIT "How did I get here?\n");
309 tmp = current->thread.mflags & ~3;
310 current->thread.mflags = tmp | (arg1 & 3);
325 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
327 * This is really horribly ugly.
329 asmlinkage int sys_ipc (uint call, int first, int second,
330 unsigned long third, void __user *ptr, long fifth)
334 version = call >> 16; /* hack for backward compatibility */
339 return sys_semtimedop (first, (struct sembuf __user *)ptr,
342 return sys_semtimedop (first, (struct sembuf __user *)ptr,
344 (const struct timespec __user *)fifth);
346 return sys_semget (first, second, third);
351 if (get_user(fourth.__pad, (void *__user *) ptr))
353 return sys_semctl (first, second, third, fourth);
357 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
362 struct ipc_kludge tmp;
366 if (copy_from_user(&tmp,
367 (struct ipc_kludge __user *) ptr,
370 return sys_msgrcv (first, tmp.msgp, second,
374 return sys_msgrcv (first,
375 (struct msgbuf __user *) ptr,
376 second, fifth, third);
379 return sys_msgget ((key_t) first, second);
381 return sys_msgctl (first, second,
382 (struct msqid_ds __user *) ptr);
388 ret = do_shmat (first, (char __user *) ptr, second,
392 return put_user (raddr, (ulong __user *) third);
394 case 1: /* iBCS2 emulator entry point */
395 if (!segment_eq(get_fs(), get_ds()))
397 return do_shmat (first, (char __user *) ptr, second,
401 return sys_shmdt ((char __user *)ptr);
403 return sys_shmget (first, second, third);
405 return sys_shmctl (first, second,
406 (struct shmid_ds __user *) ptr);
413 * No implemented yet ...
415 asmlinkage int sys_cachectl(char *addr, int nbytes, int op)
421 * If we ever come here the user sp is bad. Zap the process right away.
422 * Due to the bad stack signaling wouldn't work.
424 asmlinkage void bad_stack(void)