1 /* $Id: sys_sparc.c,v 1.70 2001/04/14 01:12:02 davem Exp $
2 * linux/arch/sparc/kernel/sys_sparc.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/sparc
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/sched.h>
14 #include <linux/file.h>
15 #include <linux/sem.h>
16 #include <linux/msg.h>
17 #include <linux/shm.h>
18 #include <linux/stat.h>
19 #include <linux/syscalls.h>
20 #include <linux/mman.h>
21 #include <linux/utsname.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/vs_cvirt.h>
26 #include <asm/uaccess.h>
29 /* #define DEBUG_UNIMP_SYSCALL */
31 /* XXX Make this per-binary type, this way we can detect the type of
32 * XXX a binary. Every Sparc executable calls this very early on.
34 asmlinkage unsigned long sys_getpagesize(void)
36 return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */
39 #define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
41 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
43 struct vm_area_struct * vmm;
45 if (flags & MAP_FIXED) {
46 /* We do not accept a shared mapping if it would violate
47 * cache aliasing constraints.
49 if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
54 /* See asm-sparc/uaccess.h */
55 if (len > TASK_SIZE - PAGE_SIZE)
57 if (ARCH_SUN4C_SUN4 && len > 0x20000000)
60 addr = TASK_UNMAPPED_BASE;
62 if (flags & MAP_SHARED)
63 addr = COLOUR_ALIGN(addr);
65 addr = PAGE_ALIGN(addr);
67 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
68 /* At this point: (!vmm || addr < vmm->vm_end). */
69 if (ARCH_SUN4C_SUN4 && addr < 0xe0000000 && 0x20000000 - len < addr) {
71 vmm = find_vma(current->mm, PAGE_OFFSET);
73 if (TASK_SIZE - PAGE_SIZE - len < addr)
75 if (!vmm || addr + len <= vmm->vm_start)
78 if (flags & MAP_SHARED)
79 addr = COLOUR_ALIGN(addr);
83 asmlinkage unsigned long sparc_brk(unsigned long brk)
86 if ((brk & 0xe0000000) != (current->mm->brk & 0xe0000000))
87 return current->mm->brk;
93 * sys_pipe() is the normal C calling standard for creating
94 * a pipe. It's not the way unix traditionally does this, though.
96 asmlinkage int sparc_pipe(struct pt_regs *regs)
104 regs->u_regs[UREG_I1] = fd[1];
111 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
113 * This is really horribly ugly.
116 asmlinkage int sys_ipc (uint call, int first, int second, int third, void __user *ptr, long fifth)
120 version = call >> 16; /* hack for backward compatibility */
126 err = sys_semtimedop (first, (struct sembuf __user *)ptr, second, NULL);
129 err = sys_semtimedop (first, (struct sembuf __user *)ptr, second, (const struct timespec __user *) fifth);
132 err = sys_semget (first, second, third);
140 if (get_user(fourth.__pad,
141 (void __user * __user *)ptr))
143 err = sys_semctl (first, second, third, fourth);
153 err = sys_msgsnd (first, (struct msgbuf __user *) ptr,
159 struct ipc_kludge tmp;
164 if (copy_from_user(&tmp, (struct ipc_kludge __user *) ptr, sizeof (tmp)))
166 err = sys_msgrcv (first, tmp.msgp, second, tmp.msgtyp, third);
170 err = sys_msgrcv (first,
171 (struct msgbuf __user *) ptr,
172 second, fifth, third);
176 err = sys_msgget ((key_t) first, second);
179 err = sys_msgctl (first, second, (struct msqid_ds __user *) ptr);
191 err = do_shmat (first, (char __user *) ptr, second, &raddr);
195 if (put_user (raddr, (ulong __user *) third))
200 case 1: /* iBCS2 emulator entry point */
201 err = do_shmat (first, (char __user *) ptr,
202 second, (ulong *) third);
206 err = sys_shmdt ((char __user *)ptr);
209 err = sys_shmget (first, second, third);
212 err = sys_shmctl (first, second, (struct shmid_ds __user *) ptr);
224 /* Linux version of mmap */
225 static unsigned long do_mmap2(unsigned long addr, unsigned long len,
226 unsigned long prot, unsigned long flags, unsigned long fd,
229 struct file * file = NULL;
230 unsigned long retval = -EBADF;
232 if (!(flags & MAP_ANONYMOUS)) {
239 len = PAGE_ALIGN(len);
240 if (ARCH_SUN4C_SUN4 &&
242 ((flags & MAP_FIXED) &&
243 addr < 0xe0000000 && addr + len > 0x20000000)))
246 /* See asm-sparc/uaccess.h */
247 if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
250 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
252 down_write(¤t->mm->mmap_sem);
253 retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
254 up_write(¤t->mm->mmap_sem);
263 asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
264 unsigned long prot, unsigned long flags, unsigned long fd,
267 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
269 return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12));
272 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
273 unsigned long prot, unsigned long flags, unsigned long fd,
276 return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
279 long sparc_remap_file_pages(unsigned long start, unsigned long size,
280 unsigned long prot, unsigned long pgoff,
283 /* This works on an existing mmap so we don't need to validate
284 * the range as that was done at the original mmap call.
286 return sys_remap_file_pages(start, size, prot,
287 (pgoff >> (PAGE_SHIFT - 12)), flags);
290 extern unsigned long do_mremap(unsigned long addr,
291 unsigned long old_len, unsigned long new_len,
292 unsigned long flags, unsigned long new_addr);
294 asmlinkage unsigned long sparc_mremap(unsigned long addr,
295 unsigned long old_len, unsigned long new_len,
296 unsigned long flags, unsigned long new_addr)
298 struct vm_area_struct *vma;
299 unsigned long ret = -EINVAL;
300 if (ARCH_SUN4C_SUN4) {
301 if (old_len > 0x20000000 || new_len > 0x20000000)
303 if (addr < 0xe0000000 && addr + old_len > 0x20000000)
306 if (old_len > TASK_SIZE - PAGE_SIZE ||
307 new_len > TASK_SIZE - PAGE_SIZE)
309 down_write(¤t->mm->mmap_sem);
310 if (flags & MREMAP_FIXED) {
311 if (ARCH_SUN4C_SUN4 &&
312 new_addr < 0xe0000000 &&
313 new_addr + new_len > 0x20000000)
315 if (new_addr + new_len > TASK_SIZE - PAGE_SIZE)
317 } else if ((ARCH_SUN4C_SUN4 && addr < 0xe0000000 &&
318 addr + new_len > 0x20000000) ||
319 addr + new_len > TASK_SIZE - PAGE_SIZE) {
320 unsigned long map_flags = 0;
321 struct file *file = NULL;
324 if (!(flags & MREMAP_MAYMOVE))
327 vma = find_vma(current->mm, addr);
329 if (vma->vm_flags & VM_SHARED)
330 map_flags |= MAP_SHARED;
334 new_addr = get_unmapped_area(file, addr, new_len,
335 vma ? vma->vm_pgoff : 0,
338 if (new_addr & ~PAGE_MASK)
340 flags |= MREMAP_FIXED;
342 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
344 up_write(¤t->mm->mmap_sem);
349 /* we come to here via sys_nis_syscall so it can setup the regs argument */
350 asmlinkage unsigned long
351 c_sys_nis_syscall (struct pt_regs *regs)
353 static int count = 0;
357 printk ("%s[%d]: Unimplemented SPARC system call %d\n",
358 current->comm, current->pid, (int)regs->u_regs[1]);
359 #ifdef DEBUG_UNIMP_SYSCALL
365 /* #define DEBUG_SPARC_BREAKPOINT */
368 sparc_breakpoint (struct pt_regs *regs)
373 #ifdef DEBUG_SPARC_BREAKPOINT
374 printk ("TRAP: Entering kernel PC=%x, nPC=%x\n", regs->pc, regs->npc);
376 info.si_signo = SIGTRAP;
378 info.si_code = TRAP_BRKPT;
379 info.si_addr = (void __user *)regs->pc;
381 force_sig_info(SIGTRAP, &info, current);
383 #ifdef DEBUG_SPARC_BREAKPOINT
384 printk ("TRAP: Returning to space: PC=%x nPC=%x\n", regs->pc, regs->npc);
390 sparc_sigaction (int sig, const struct old_sigaction __user *act,
391 struct old_sigaction __user *oact)
393 struct k_sigaction new_ka, old_ka;
397 current->thread.new_signal = 1;
404 if (verify_area(VERIFY_READ, act, sizeof(*act)) ||
405 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
406 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
408 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
409 __get_user(mask, &act->sa_mask);
410 siginitset(&new_ka.sa.sa_mask, mask);
411 new_ka.ka_restorer = NULL;
414 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
417 /* In the clone() case we could copy half consistent
418 * state to the user, however this could sleep and
419 * deadlock us if we held the signal lock on SMP. So for
420 * now I take the easy way out and do no locking.
422 if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) ||
423 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
424 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
426 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
427 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
434 sys_rt_sigaction(int sig,
435 const struct sigaction __user *act,
436 struct sigaction __user *oact,
437 void __user *restorer,
440 struct k_sigaction new_ka, old_ka;
443 /* XXX: Don't preclude handling different sized sigset_t's. */
444 if (sigsetsize != sizeof(sigset_t))
447 /* All tasks which use RT signals (effectively) use
450 current->thread.new_signal = 1;
453 new_ka.ka_restorer = restorer;
454 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
458 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
461 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
468 asmlinkage int sys_getdomainname(char __user *name, int len)
475 nlen = strlen(vx_new_uts(domainname)) + 1;
479 if (len > __NEW_UTS_LEN)
481 if (copy_to_user(name, vx_new_uts(domainname), len))