1 /* $Id: sys_sparc.c,v 1.57 2002/02/09 19:49:30 davem Exp $
2 * linux/arch/sparc64/kernel/sys_sparc.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/sparc
9 #include <linux/config.h>
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/sched.h>
14 #include <linux/file.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/mman.h>
21 #include <linux/utsname.h>
22 #include <linux/smp.h>
23 #include <linux/smp_lock.h>
24 #include <linux/slab.h>
25 #include <linux/syscalls.h>
26 #include <linux/ipc.h>
27 #include <linux/personality.h>
29 #include <asm/uaccess.h>
31 #include <asm/utrap.h>
32 #include <asm/perfctr.h>
34 /* #define DEBUG_UNIMP_SYSCALL */
36 /* XXX Make this per-binary type, this way we can detect the type of
37 * XXX a binary. Every Sparc executable calls this very early on.
39 asmlinkage unsigned long sys_getpagesize(void)
44 #define COLOUR_ALIGN(addr,pgoff) \
45 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
46 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
48 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
50 struct mm_struct *mm = current->mm;
51 struct vm_area_struct * vma;
52 unsigned long task_size = TASK_SIZE;
53 unsigned long start_addr;
56 if (flags & MAP_FIXED) {
57 /* We do not accept a shared mapping if it would violate
58 * cache aliasing constraints.
60 if ((flags & MAP_SHARED) &&
61 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
66 if (test_thread_flag(TIF_32BIT))
67 task_size = 0xf0000000UL;
68 if (len > task_size || len > -PAGE_OFFSET)
72 if (filp || (flags & MAP_SHARED))
77 addr = COLOUR_ALIGN(addr, pgoff);
79 addr = PAGE_ALIGN(addr);
81 vma = find_vma(mm, addr);
82 if (task_size - len >= addr &&
83 (!vma || addr + len <= vma->vm_start))
87 start_addr = addr = mm->free_area_cache;
93 addr = COLOUR_ALIGN(addr, pgoff);
95 addr = PAGE_ALIGN(addr);
97 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
98 /* At this point: (!vma || addr < vma->vm_end). */
99 if (addr < PAGE_OFFSET && -PAGE_OFFSET - len < addr) {
101 vma = find_vma(mm, PAGE_OFFSET);
103 if (task_size < addr) {
104 if (start_addr != TASK_UNMAPPED_BASE) {
105 start_addr = addr = TASK_UNMAPPED_BASE;
110 if (!vma || addr + len <= vma->vm_start) {
112 * Remember the place where we stopped the search:
114 mm->free_area_cache = addr + len;
119 addr = COLOUR_ALIGN(addr, pgoff);
123 /* Try to align mapping such that we align it as much as possible. */
124 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
126 unsigned long align_goal, addr = -ENOMEM;
128 if (flags & MAP_FIXED) {
129 /* Ok, don't mess with it. */
130 return get_unmapped_area(NULL, addr, len, pgoff, flags, 0);
132 flags &= ~MAP_SHARED;
134 align_goal = PAGE_SIZE;
135 if (len >= (4UL * 1024 * 1024))
136 align_goal = (4UL * 1024 * 1024);
137 else if (len >= (512UL * 1024))
138 align_goal = (512UL * 1024);
139 else if (len >= (64UL * 1024))
140 align_goal = (64UL * 1024);
143 addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags, 0);
144 if (!(addr & ~PAGE_MASK)) {
145 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
149 if (align_goal == (4UL * 1024 * 1024))
150 align_goal = (512UL * 1024);
151 else if (align_goal == (512UL * 1024))
152 align_goal = (64UL * 1024);
154 align_goal = PAGE_SIZE;
155 } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
157 /* Mapping is smaller than 64K or larger areas could not
160 if (addr & ~PAGE_MASK)
161 addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags, 0);
166 asmlinkage unsigned long sparc_brk(unsigned long brk)
168 /* People could try to be nasty and use ta 0x6d in 32bit programs */
169 if (test_thread_flag(TIF_32BIT) &&
171 return current->mm->brk;
173 if ((current->mm->brk & PAGE_OFFSET) != (brk & PAGE_OFFSET))
174 return current->mm->brk;
179 * sys_pipe() is the normal C calling standard for creating
180 * a pipe. It's not the way unix traditionally does this, though.
182 asmlinkage long sparc_pipe(struct pt_regs *regs)
190 regs->u_regs[UREG_I1] = fd[1];
197 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
199 * This is really horribly ugly.
202 asmlinkage long sys_ipc(unsigned int call, int first, int second, unsigned long third, void __user *ptr, long fifth)
206 /* No need for backward compatibility. We can start fresh... */
207 if (call <= SEMCTL) {
210 err = sys_semtimedop(first, ptr, second, NULL);
213 err = sys_semtimedop(first, ptr, second,
214 (const struct timespec __user *) fifth);
217 err = sys_semget(first, second, (int)third);
225 if (get_user(fourth.__pad,
226 (void __user * __user *) ptr))
228 err = sys_semctl(first, second | IPC_64,
237 if (call <= MSGCTL) {
240 err = sys_msgsnd(first, ptr, second, (int)third);
243 err = sys_msgrcv(first, ptr, second, fifth,
247 err = sys_msgget((key_t) first, second);
250 err = sys_msgctl(first, second | IPC_64, ptr);
257 if (call <= SHMCTL) {
261 err = do_shmat(first, ptr, second, &raddr);
264 (ulong __user *) third))
270 err = sys_shmdt(ptr);
273 err = sys_shmget(first, second, (int)third);
276 err = sys_shmctl(first, second | IPC_64, ptr);
289 asmlinkage long sparc64_newuname(struct new_utsname __user *name)
291 int ret = sys_newuname(name);
293 if (current->personality == PER_LINUX32 && !ret) {
294 ret = (copy_to_user(name->machine, "sparc\0\0", 8)
300 asmlinkage long sparc64_personality(unsigned long personality)
304 if (current->personality == PER_LINUX32 &&
305 personality == PER_LINUX)
306 personality = PER_LINUX32;
307 ret = sys_personality(personality);
308 if (ret == PER_LINUX32)
314 /* Linux version of mmap */
315 asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
316 unsigned long prot, unsigned long flags, unsigned long fd,
319 struct file * file = NULL;
320 unsigned long retval = -EBADF;
322 if (!(flags & MAP_ANONYMOUS)) {
327 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
328 len = PAGE_ALIGN(len);
331 if (test_thread_flag(TIF_32BIT)) {
332 if (len > 0xf0000000UL ||
333 ((flags & MAP_FIXED) && addr > 0xf0000000UL - len))
336 if (len > -PAGE_OFFSET ||
337 ((flags & MAP_FIXED) &&
338 addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
342 down_write(¤t->mm->mmap_sem);
343 retval = do_mmap(file, addr, len, prot, flags, off);
344 up_write(¤t->mm->mmap_sem);
353 asmlinkage long sys64_munmap(unsigned long addr, size_t len)
357 if (len > -PAGE_OFFSET ||
358 (addr < PAGE_OFFSET && addr + len > -PAGE_OFFSET))
360 down_write(¤t->mm->mmap_sem);
361 ret = do_munmap(current->mm, addr, len);
362 up_write(¤t->mm->mmap_sem);
366 extern unsigned long do_mremap(unsigned long addr,
367 unsigned long old_len, unsigned long new_len,
368 unsigned long flags, unsigned long new_addr);
370 asmlinkage unsigned long sys64_mremap(unsigned long addr,
371 unsigned long old_len, unsigned long new_len,
372 unsigned long flags, unsigned long new_addr)
374 struct vm_area_struct *vma;
375 unsigned long ret = -EINVAL;
376 if (test_thread_flag(TIF_32BIT))
378 if (old_len > -PAGE_OFFSET || new_len > -PAGE_OFFSET)
380 if (addr < PAGE_OFFSET && addr + old_len > -PAGE_OFFSET)
382 down_write(¤t->mm->mmap_sem);
383 if (flags & MREMAP_FIXED) {
384 if (new_addr < PAGE_OFFSET &&
385 new_addr + new_len > -PAGE_OFFSET)
387 } else if (addr < PAGE_OFFSET && addr + new_len > -PAGE_OFFSET) {
388 unsigned long map_flags = 0;
389 struct file *file = NULL;
392 if (!(flags & MREMAP_MAYMOVE))
395 vma = find_vma(current->mm, addr);
397 if (vma->vm_flags & VM_SHARED)
398 map_flags |= MAP_SHARED;
402 /* MREMAP_FIXED checked above. */
403 new_addr = get_unmapped_area(file, addr, new_len,
404 vma ? vma->vm_pgoff : 0,
405 map_flags, vma->vm_flags & VM_EXEC);
407 if (new_addr & ~PAGE_MASK)
409 flags |= MREMAP_FIXED;
411 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
413 up_write(¤t->mm->mmap_sem);
418 /* we come to here via sys_nis_syscall so it can setup the regs argument */
419 asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
423 /* Don't make the system unusable, if someone goes stuck */
427 printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
428 #ifdef DEBUG_UNIMP_SYSCALL
435 /* #define DEBUG_SPARC_BREAKPOINT */
437 asmlinkage void sparc_breakpoint(struct pt_regs *regs)
441 if (test_thread_flag(TIF_32BIT)) {
442 regs->tpc &= 0xffffffff;
443 regs->tnpc &= 0xffffffff;
445 #ifdef DEBUG_SPARC_BREAKPOINT
446 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
448 info.si_signo = SIGTRAP;
450 info.si_code = TRAP_BRKPT;
451 info.si_addr = (void __user *)regs->tpc;
453 force_sig_info(SIGTRAP, &info, current);
454 #ifdef DEBUG_SPARC_BREAKPOINT
455 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
459 extern void check_pending(int signum);
461 asmlinkage long sys_getdomainname(char __user *name, int len)
468 nlen = strlen(vx_new_uts(domainname)) + 1;
472 if (len > __NEW_UTS_LEN)
474 if (copy_to_user(name, vx_new_uts(domainname), len))
482 asmlinkage long solaris_syscall(struct pt_regs *regs)
486 regs->tpc = regs->tnpc;
488 if (test_thread_flag(TIF_32BIT)) {
489 regs->tpc &= 0xffffffff;
490 regs->tnpc &= 0xffffffff;
493 printk ("For Solaris binary emulation you need solaris module loaded\n");
496 send_sig(SIGSEGV, current, 1);
501 #ifndef CONFIG_SUNOS_EMUL
502 asmlinkage long sunos_syscall(struct pt_regs *regs)
506 regs->tpc = regs->tnpc;
508 if (test_thread_flag(TIF_32BIT)) {
509 regs->tpc &= 0xffffffff;
510 regs->tnpc &= 0xffffffff;
513 printk ("SunOS binary emulation not compiled in\n");
514 force_sig(SIGSEGV, current);
520 asmlinkage long sys_utrap_install(utrap_entry_t type,
521 utrap_handler_t new_p,
522 utrap_handler_t new_d,
523 utrap_handler_t __user *old_p,
524 utrap_handler_t __user *old_d)
526 if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
528 if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
530 if (!current_thread_info()->utraps) {
531 if (put_user(NULL, old_p))
534 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
539 if (put_user(NULL, old_d))
544 if (!current_thread_info()->utraps) {
545 current_thread_info()->utraps =
546 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
547 if (!current_thread_info()->utraps)
549 current_thread_info()->utraps[0] = 1;
550 memset(current_thread_info()->utraps+1, 0,
551 UT_TRAP_INSTRUCTION_31*sizeof(long));
553 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
554 current_thread_info()->utraps[0] > 1) {
555 long *p = current_thread_info()->utraps;
557 current_thread_info()->utraps =
558 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
560 if (!current_thread_info()->utraps) {
561 current_thread_info()->utraps = p;
565 current_thread_info()->utraps[0] = 1;
566 memcpy(current_thread_info()->utraps+1, p+1,
567 UT_TRAP_INSTRUCTION_31*sizeof(long));
571 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
575 if (put_user(NULL, old_d))
578 current_thread_info()->utraps[type] = (long)new_p;
583 long sparc_memory_ordering(unsigned long model, struct pt_regs *regs)
587 regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
591 asmlinkage long sys_rt_sigaction(int sig,
592 const struct sigaction __user *act,
593 struct sigaction __user *oact,
594 void __user *restorer,
597 struct k_sigaction new_ka, old_ka;
600 /* XXX: Don't preclude handling different sized sigset_t's. */
601 if (sigsetsize != sizeof(sigset_t))
605 new_ka.ka_restorer = restorer;
606 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
610 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
613 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
620 /* Invoked by rtrap code to update performance counters in
623 asmlinkage void update_perfctrs(void)
625 unsigned long pic, tmp;
628 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
629 __put_user(tmp, current_thread_info()->user_cntd0);
630 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
631 __put_user(tmp, current_thread_info()->user_cntd1);
635 asmlinkage long sys_perfctr(int opcode, unsigned long arg0, unsigned long arg1, unsigned long arg2)
641 current_thread_info()->pcr_reg = arg2;
642 current_thread_info()->user_cntd0 = (u64 __user *) arg0;
643 current_thread_info()->user_cntd1 = (u64 __user *) arg1;
644 current_thread_info()->kernel_cntd0 =
645 current_thread_info()->kernel_cntd1 = 0;
648 set_thread_flag(TIF_PERFCTR);
653 if (test_thread_flag(TIF_PERFCTR)) {
654 current_thread_info()->user_cntd0 =
655 current_thread_info()->user_cntd1 = NULL;
656 current_thread_info()->pcr_reg = 0;
658 clear_thread_flag(TIF_PERFCTR);
664 unsigned long pic, tmp;
666 if (!test_thread_flag(TIF_PERFCTR)) {
671 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
672 err |= __put_user(tmp, current_thread_info()->user_cntd0);
673 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
674 err |= __put_user(tmp, current_thread_info()->user_cntd1);
680 if (!test_thread_flag(TIF_PERFCTR)) {
684 current_thread_info()->kernel_cntd0 =
685 current_thread_info()->kernel_cntd1 = 0;
689 case PERFCTR_SETPCR: {
690 u64 __user *user_pcr = (u64 __user *)arg0;
692 if (!test_thread_flag(TIF_PERFCTR)) {
696 err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
697 write_pcr(current_thread_info()->pcr_reg);
698 current_thread_info()->kernel_cntd0 =
699 current_thread_info()->kernel_cntd1 = 0;
704 case PERFCTR_GETPCR: {
705 u64 __user *user_pcr = (u64 __user *)arg0;
707 if (!test_thread_flag(TIF_PERFCTR)) {
711 err |= __put_user(current_thread_info()->pcr_reg, user_pcr);