2 * linux/arch/x86_64/kernel/sys_x86_64.c
5 #include <linux/errno.h>
6 #include <linux/sched.h>
7 #include <linux/syscalls.h>
10 #include <linux/smp_lock.h>
11 #include <linux/sem.h>
12 #include <linux/msg.h>
13 #include <linux/shm.h>
14 #include <linux/stat.h>
15 #include <linux/mman.h>
16 #include <linux/file.h>
17 #include <linux/utsname.h>
18 #include <linux/personality.h>
19 #include <linux/vs_cvirt.h>
21 #include <asm/uaccess.h>
26 * sys_pipe() is the normal C calling standard for creating
27 * a pipe. It's not the way Unix traditionally does this, though.
29 asmlinkage long sys_pipe(int __user *fildes)
36 if (copy_to_user(fildes, fd, 2*sizeof(int)))
42 long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags,
43 unsigned long fd, unsigned long off)
54 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
55 if (!(flags & MAP_ANONYMOUS)) {
60 down_write(¤t->mm->mmap_sem);
61 error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
62 up_write(¤t->mm->mmap_sem);
70 static void find_start_end(unsigned long flags, unsigned long *begin,
73 #ifdef CONFIG_IA32_EMULATION
74 if (test_thread_flag(TIF_IA32)) {
75 *begin = TASK_UNMAPPED_32;
76 *end = IA32_PAGE_OFFSET;
79 if (flags & MAP_32BIT) {
80 /* This is usually used needed to map code in small
81 model, so it needs to be in the first 31bit. Limit
82 it to that. This means we need to move the
83 unmapped base down for this case. This can give
84 conflicts with the heap, but we assume that glibc
85 malloc knows how to fall back to mmap. Give it 1GB
86 of playground for now. -AK */
90 *begin = TASK_UNMAPPED_64;
96 arch_get_unmapped_area(struct file *filp, unsigned long addr,
97 unsigned long len, unsigned long pgoff, unsigned long flags)
99 struct mm_struct *mm = current->mm;
100 struct vm_area_struct *vma;
101 unsigned long start_addr;
102 unsigned long begin, end;
104 find_start_end(flags, &begin, &end);
110 addr = PAGE_ALIGN(addr);
111 vma = find_vma(mm, addr);
112 if (end - len >= addr &&
113 (!vma || addr + len <= vma->vm_start))
116 addr = mm->free_area_cache;
122 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
123 /* At this point: (!vma || addr < vma->vm_end). */
124 if (end - len < addr) {
126 * Start a new search - just in case we missed
129 if (start_addr != begin) {
130 start_addr = addr = begin;
135 if (!vma || addr + len <= vma->vm_start) {
137 * Remember the place where we stopped the search:
139 mm->free_area_cache = addr + len;
146 asmlinkage long sys_uname(struct new_utsname __user * name)
150 err = copy_to_user(name, vx_new_utsname(), sizeof (*name));
152 if (personality(current->personality) == PER_LINUX32)
153 err |= copy_to_user(&name->machine, "i686", 5);
154 return err ? -EFAULT : 0;
157 asmlinkage long wrap_sys_shmat(int shmid, char __user *shmaddr, int shmflg)
160 return do_shmat(shmid,shmaddr,shmflg,&raddr) ?: (long)raddr;
163 asmlinkage long sys_time64(long __user * tloc)
168 do_gettimeofday(&now);
171 if (put_user(i,tloc))