1 /* $Id: sys_sparc.c,v 1.57 2002/02/09 19:49:30 davem Exp $
2 * linux/arch/sparc64/kernel/sys_sparc.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/sparc
9 #include <linux/config.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/sched.h>
14 #include <linux/file.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/mman.h>
21 #include <linux/utsname.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/slab.h>
25 #include <linux/syscalls.h>
26 #include <linux/ipc.h>
27 #include <linux/personality.h>
29 #include <asm/uaccess.h>
31 #include <asm/utrap.h>
32 #include <asm/perfctr.h>
34 /* #define DEBUG_UNIMP_SYSCALL */
36 /* XXX Make this per-binary type, this way we can detect the type of
37 * XXX a binary. Every Sparc executable calls this very early on.
39 asmlinkage unsigned long sys_getpagesize(void)
44 #define COLOUR_ALIGN(addr,pgoff) \
45 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
46 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
48 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
50 struct mm_struct *mm = current->mm;
51 struct vm_area_struct * vma;
52 unsigned long task_size = TASK_SIZE;
53 unsigned long start_addr;
56 if (flags & MAP_FIXED) {
57 /* We do not accept a shared mapping if it would violate
58 * cache aliasing constraints.
60 if ((flags & MAP_SHARED) &&
61 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
66 if (test_thread_flag(TIF_32BIT))
67 task_size = 0xf0000000UL;
68 if (len > task_size || len > -PAGE_OFFSET)
72 if (filp || (flags & MAP_SHARED))
77 addr = COLOUR_ALIGN(addr, pgoff);
79 addr = PAGE_ALIGN(addr);
81 vma = find_vma(mm, addr);
82 if (task_size - len >= addr &&
83 (!vma || addr + len <= vma->vm_start))
87 start_addr = addr = mm->free_area_cache;
93 addr = COLOUR_ALIGN(addr, pgoff);
95 addr = PAGE_ALIGN(addr);
97 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
98 /* At this point: (!vma || addr < vma->vm_end). */
99 if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) {
101 vma = find_vma(mm, PAGE_OFFSET);
103 if (task_size < addr) {
104 if (start_addr != TASK_UNMAPPED_BASE) {
105 start_addr = addr = TASK_UNMAPPED_BASE;
110 if (!vma || addr + len <= vma->vm_start) {
112 * Remember the place where we stopped the search:
114 mm->free_area_cache = addr + len;
119 addr = COLOUR_ALIGN(addr, pgoff);
123 /* Try to align mapping such that we align it as much as possible. */
124 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
126 unsigned long align_goal, addr = -ENOMEM;
128 if (flags & MAP_FIXED) {
129 /* Ok, don't mess with it. */
130 return get_unmapped_area(NULL, addr, len, pgoff, flags);
132 flags &= ~MAP_SHARED;
134 align_goal = PAGE_SIZE;
135 if (len >= (4UL * 1024 * 1024))
136 align_goal = (4UL * 1024 * 1024);
137 else if (len >= (512UL * 1024))
138 align_goal = (512UL * 1024);
139 else if (len >= (64UL * 1024))
140 align_goal = (64UL * 1024);
143 addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
144 if (!(addr & ~PAGE_MASK)) {
145 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
149 if (align_goal == (4UL * 1024 * 1024))
150 align_goal = (512UL * 1024);
151 else if (align_goal == (512UL * 1024))
152 align_goal = (64UL * 1024);
154 align_goal = PAGE_SIZE;
155 } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
157 /* Mapping is smaller than 64K or larger areas could not
160 if (addr & ~PAGE_MASK)
161 addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags);
166 asmlinkage unsigned long sparc_brk(unsigned long brk)
168 /* People could try to be nasty and use ta 0x6d in 32bit programs */
169 if (test_thread_flag(TIF_32BIT) &&
171 return current->mm->brk;
173 if ((current->mm->brk & PAGE_OFFSET) != (brk & PAGE_OFFSET))
174 return current->mm->brk;
179 * sys_pipe() is the normal C calling standard for creating
180 * a pipe. It's not the way unix traditionally does this, though.
182 asmlinkage int sparc_pipe(struct pt_regs *regs)
190 regs->u_regs[UREG_I1] = fd[1];
197 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
199 * This is really horribly ugly.
202 asmlinkage int sys_ipc (unsigned call, int first, int second, unsigned long third, void *ptr, long fifth)
206 /* No need for backward compatibility. We can start fresh... */
211 err = sys_semtimedop (first, (struct sembuf *)ptr, second, NULL);
214 err = sys_semtimedop (first, (struct sembuf *)ptr, second, (const struct timespec *) fifth);
217 err = sys_semget (first, second, (int)third);
225 if(get_user(fourth.__pad, (void **)ptr))
227 err = sys_semctl (first, second | IPC_64, (int)third, fourth);
237 err = sys_msgsnd (first, (struct msgbuf *) ptr,
241 err = sys_msgrcv (first, (struct msgbuf *) ptr, second, fifth, (int)third);
244 err = sys_msgget ((key_t) first, second);
247 err = sys_msgctl (first, second | IPC_64, (struct msqid_ds *) ptr);
257 err = do_shmat (first, (char *) ptr, second, &raddr);
259 if (put_user(raddr, (ulong __user *) third))
265 err = sys_shmdt ((char *)ptr);
268 err = sys_shmget (first, second, (int)third);
271 err = sys_shmctl (first, second | IPC_64, (struct shmid_ds *) ptr);
283 asmlinkage int sparc64_newuname(struct new_utsname __user *name)
285 int ret = sys_newuname(name);
287 if (current->personality == PER_LINUX32 && !ret) {
288 ret = copy_to_user(name->machine, "sparc\0\0", 8) ? -EFAULT : 0;
293 asmlinkage int sparc64_personality(unsigned long personality)
297 if (current->personality == PER_LINUX32 && personality == PER_LINUX)
298 personality = PER_LINUX32;
299 ret = sys_personality(personality);
300 if (ret == PER_LINUX32)
306 /* Linux version of mmap */
307 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
308 unsigned long prot, unsigned long flags, unsigned long fd,
311 struct file * file = NULL;
312 unsigned long retval = -EBADF;
314 if (!(flags & MAP_ANONYMOUS)) {
319 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
320 len = PAGE_ALIGN(len);
323 if (test_thread_flag(TIF_32BIT)) {
324 if (len > 0xf0000000UL ||
325 ((flags & MAP_FIXED) && addr > 0xf0000000UL - len))
328 if (len > -PAGE_OFFSET ||
329 ((flags & MAP_FIXED) &&
330 addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
334 down_write(¤t->mm->mmap_sem);
335 retval = do_mmap(file, addr, len, prot, flags, off);
336 up_write(¤t->mm->mmap_sem);
345 asmlinkage long sys64_munmap(unsigned long addr, size_t len)
349 if (len > -PAGE_OFFSET ||
350 (addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
352 down_write(¤t->mm->mmap_sem);
353 ret = do_munmap(current->mm, addr, len);
354 up_write(¤t->mm->mmap_sem);
358 extern unsigned long do_mremap(unsigned long addr,
359 unsigned long old_len, unsigned long new_len,
360 unsigned long flags, unsigned long new_addr);
362 asmlinkage unsigned long sys64_mremap(unsigned long addr,
363 unsigned long old_len, unsigned long new_len,
364 unsigned long flags, unsigned long new_addr)
366 struct vm_area_struct *vma;
367 unsigned long ret = -EINVAL;
368 if (test_thread_flag(TIF_32BIT))
370 if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET)
372 if (addr < PAGE_OFFSET && addr + old_len > -PAGE_OFFSET)
374 down_write(¤t->mm->mmap_sem);
375 if (flags & MREMAP_FIXED) {
376 if (new_addr < PAGE_OFFSET &&
377 new_addr + new_len > -PAGE_OFFSET)
379 } else if (addr < PAGE_OFFSET && addr + new_len > -PAGE_OFFSET) {
380 unsigned long map_flags = 0;
381 struct file *file = NULL;
384 if (!(flags & MREMAP_MAYMOVE))
387 vma = find_vma(current->mm, addr);
389 if (vma->vm_flags & VM_SHARED)
390 map_flags |= MAP_SHARED;
394 /* MREMAP_FIXED checked above. */
395 new_addr = get_unmapped_area(file, addr, new_len,
396 vma ? vma->vm_pgoff : 0,
399 if (new_addr & ~PAGE_MASK)
401 flags |= MREMAP_FIXED;
403 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
405 up_write(¤t->mm->mmap_sem);
410 /* we come to here via sys_nis_syscall so it can setup the regs argument */
411 asmlinkage unsigned long
412 c_sys_nis_syscall (struct pt_regs *regs)
416 /* Don't make the system unusable, if someone goes stuck */
420 printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
421 #ifdef DEBUG_UNIMP_SYSCALL
428 /* #define DEBUG_SPARC_BREAKPOINT */
431 sparc_breakpoint (struct pt_regs *regs)
435 if (test_thread_flag(TIF_32BIT)) {
436 regs->tpc &= 0xffffffff;
437 regs->tnpc &= 0xffffffff;
439 #ifdef DEBUG_SPARC_BREAKPOINT
440 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
442 info.si_signo = SIGTRAP;
444 info.si_code = TRAP_BRKPT;
445 info.si_addr = (void *)regs->tpc;
447 force_sig_info(SIGTRAP, &info, current);
448 #ifdef DEBUG_SPARC_BREAKPOINT
449 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
453 extern void check_pending(int signum);
455 asmlinkage int sys_getdomainname(char __user *name, int len)
462 nlen = strlen(vx_new_uts(domainname)) + 1;
466 if (len > __NEW_UTS_LEN)
468 if (copy_to_user(name, vx_new_uts(domainname), len))
476 asmlinkage int solaris_syscall(struct pt_regs *regs)
480 regs->tpc = regs->tnpc;
482 if (test_thread_flag(TIF_32BIT)) {
483 regs->tpc &= 0xffffffff;
484 regs->tnpc &= 0xffffffff;
487 printk ("For Solaris binary emulation you need solaris module loaded\n");
490 send_sig(SIGSEGV, current, 1);
495 #ifndef CONFIG_SUNOS_EMUL
496 asmlinkage int sunos_syscall(struct pt_regs *regs)
500 regs->tpc = regs->tnpc;
502 if (test_thread_flag(TIF_32BIT)) {
503 regs->tpc &= 0xffffffff;
504 regs->tnpc &= 0xffffffff;
507 printk ("SunOS binary emulation not compiled in\n");
508 force_sig(SIGSEGV, current);
514 asmlinkage int sys_utrap_install(utrap_entry_t type,
515 utrap_handler_t new_p,
516 utrap_handler_t new_d,
517 utrap_handler_t __user *old_p,
518 utrap_handler_t __user *old_d)
520 if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
522 if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
524 if (!current_thread_info()->utraps) {
525 if (put_user(NULL, old_p))
528 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
533 if (put_user(NULL, old_d))
538 if (!current_thread_info()->utraps) {
539 current_thread_info()->utraps =
540 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
541 if (!current_thread_info()->utraps)
543 current_thread_info()->utraps[0] = 1;
544 memset(current_thread_info()->utraps+1, 0,
545 UT_TRAP_INSTRUCTION_31*sizeof(long));
547 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
548 current_thread_info()->utraps[0] > 1) {
549 long *p = current_thread_info()->utraps;
551 current_thread_info()->utraps =
552 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
554 if (!current_thread_info()->utraps) {
555 current_thread_info()->utraps = p;
559 current_thread_info()->utraps[0] = 1;
560 memcpy(current_thread_info()->utraps+1, p+1,
561 UT_TRAP_INSTRUCTION_31*sizeof(long));
565 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
569 if (put_user(NULL, old_d))
572 current_thread_info()->utraps[type] = (long)new_p;
577 long sparc_memory_ordering(unsigned long model, struct pt_regs *regs)
581 regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
586 sys_rt_sigaction(int sig,
587 const struct sigaction __user *act,
588 struct sigaction __user *oact,
589 void __user *restorer,
592 struct k_sigaction new_ka, old_ka;
595 /* XXX: Don't preclude handling different sized sigset_t's. */
596 if (sigsetsize != sizeof(sigset_t))
600 new_ka.ka_restorer = restorer;
601 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
605 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
608 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
615 /* Invoked by rtrap code to update performance counters in
619 update_perfctrs(void)
621 unsigned long pic, tmp;
624 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
625 __put_user(tmp, current_thread_info()->user_cntd0);
626 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
627 __put_user(tmp, current_thread_info()->user_cntd1);
632 sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long arg2)
638 current_thread_info()->pcr_reg = arg2;
639 current_thread_info()->user_cntd0 = (u64 *) arg0;
640 current_thread_info()->user_cntd1 = (u64 *) arg1;
641 current_thread_info()->kernel_cntd0 =
642 current_thread_info()->kernel_cntd1 = 0;
645 set_thread_flag(TIF_PERFCTR);
650 if (test_thread_flag(TIF_PERFCTR)) {
651 current_thread_info()->user_cntd0 =
652 current_thread_info()->user_cntd1 = NULL;
653 current_thread_info()->pcr_reg = 0;
655 clear_thread_flag(TIF_PERFCTR);
661 unsigned long pic, tmp;
663 if (!test_thread_flag(TIF_PERFCTR)) {
668 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
669 err |= __put_user(tmp, current_thread_info()->user_cntd0);
670 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
671 err |= __put_user(tmp, current_thread_info()->user_cntd1);
677 if (!test_thread_flag(TIF_PERFCTR)) {
681 current_thread_info()->kernel_cntd0 =
682 current_thread_info()->kernel_cntd1 = 0;
686 case PERFCTR_SETPCR: {
687 u64 *user_pcr = (u64 *)arg0;
688 if (!test_thread_flag(TIF_PERFCTR)) {
692 err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
693 write_pcr(current_thread_info()->pcr_reg);
694 current_thread_info()->kernel_cntd0 =
695 current_thread_info()->kernel_cntd1 = 0;
700 case PERFCTR_GETPCR: {
701 u64 *user_pcr = (u64 *)arg0;
702 if (!test_thread_flag(TIF_PERFCTR)) {
706 err |= __put_user(current_thread_info()->pcr_reg, user_pcr);