2 * linux/arch/x86_64/kernel/sys_x86_64.c
5 #include <linux/errno.h>
6 #include <linux/sched.h>
7 #include <linux/syscalls.h>
10 #include <linux/smp_lock.h>
11 #include <linux/sem.h>
12 #include <linux/msg.h>
13 #include <linux/shm.h>
14 #include <linux/stat.h>
15 #include <linux/mman.h>
16 #include <linux/file.h>
17 #include <linux/utsname.h>
18 #include <linux/personality.h>
19 #include <linux/vs_cvirt.h>
21 #include <asm/uaccess.h>
26 * sys_pipe() is the normal C calling standard for creating
27 * a pipe. It's not the way Unix traditionally does this, though.
29 asmlinkage long sys_pipe(int __user *fildes)
36 if (copy_to_user(fildes, fd, 2*sizeof(int)))
42 long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags,
43 unsigned long fd, unsigned long off)
54 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
55 if (!(flags & MAP_ANONYMOUS)) {
60 down_write(¤t->mm->mmap_sem);
61 error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
62 up_write(¤t->mm->mmap_sem);
70 static void find_start_end(unsigned long flags, unsigned long *begin,
73 #ifdef CONFIG_IA32_EMULATION
74 if (test_thread_flag(TIF_IA32)) {
75 *begin = TASK_UNMAPPED_32;
76 *end = IA32_PAGE_OFFSET;
79 if (flags & MAP_32BIT) {
80 /* This is usually used needed to map code in small
81 model, so it needs to be in the first 31bit. Limit
82 it to that. This means we need to move the
83 unmapped base down for this case. This can give
84 conflicts with the heap, but we assume that glibc
85 malloc knows how to fall back to mmap. Give it 1GB
86 of playground for now. -AK */
90 *begin = TASK_UNMAPPED_64;
96 arch_get_unmapped_area(struct file *filp, unsigned long addr,
97 unsigned long len, unsigned long pgoff, unsigned long flags)
99 struct mm_struct *mm = current->mm;
100 struct vm_area_struct *vma;
101 unsigned long start_addr;
102 unsigned long begin, end;
104 find_start_end(flags, &begin, &end);
110 addr = PAGE_ALIGN(addr);
111 vma = find_vma(mm, addr);
112 if (end - len >= addr &&
113 (!vma || addr + len <= vma->vm_start))
116 addr = mm->free_area_cache;
122 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
123 /* At this point: (!vma || addr < vma->vm_end). */
124 if (end - len < addr) {
126 * Start a new search - just in case we missed
129 if (start_addr != begin) {
130 start_addr = addr = begin;
135 if (!vma || addr + len <= vma->vm_start) {
137 * Remember the place where we stopped the search:
139 mm->free_area_cache = addr + len;
147 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
148 const unsigned long len, const unsigned long pgoff,
149 const unsigned long flags)
151 struct vm_area_struct *vma, *prev_vma;
152 struct mm_struct *mm = current->mm;
153 unsigned long base = mm->mmap_base, addr = addr0;
155 unsigned long begin, end;
157 find_start_end(flags, &begin, &end);
159 /* requested length too big for entire address space */
163 /* dont allow allocations above current base */
164 if (mm->free_area_cache > base)
165 mm->free_area_cache = base;
167 /* requesting a specific address */
169 addr = PAGE_ALIGN(addr);
170 vma = find_vma(mm, addr);
171 if (end - len >= addr &&
172 (!vma || addr + len <= vma->vm_start))
177 /* make sure it can fit in the remaining address space */
178 if (mm->free_area_cache < len)
181 /* either no address requested or cant fit in requested address hole */
182 addr = (mm->free_area_cache - len) & PAGE_MASK;
185 * Lookup failure means no vma is above this address,
186 * i.e. return with success:
188 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
192 * new region fits between prev_vma->vm_end and
193 * vma->vm_start, use it:
195 if (addr+len <= vma->vm_start &&
196 (!prev_vma || (addr >= prev_vma->vm_end)))
197 /* remember the address as a hint for next time */
198 return (mm->free_area_cache = addr);
200 /* pull free_area_cache down to the first hole */
201 if (mm->free_area_cache == vma->vm_end)
202 mm->free_area_cache = vma->vm_start;
204 /* try just below the current vma->vm_start */
205 addr = vma->vm_start-len;
206 } while (len <= vma->vm_start);
210 * if hint left us with no space for the requested
211 * mapping then try again:
214 mm->free_area_cache = base;
219 * A failed mmap() very likely causes application failure,
220 * so fall back to the bottom-up function here. This scenario
221 * can happen with large stack limits and large mmap()
224 mm->free_area_cache = begin;
225 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
227 * Restore the topdown base:
229 mm->free_area_cache = base;
234 asmlinkage long sys_uname(struct new_utsname __user * name)
238 err = copy_to_user(name, vx_new_utsname(), sizeof (*name));
240 if (personality(current->personality) == PER_LINUX32)
241 err |= copy_to_user(&name->machine, "i686", 5);
242 return err ? -EFAULT : 0;
245 asmlinkage long wrap_sys_shmat(int shmid, char __user *shmaddr, int shmflg)
248 return do_shmat(shmid,shmaddr,shmflg,&raddr) ?: (long)raddr;
251 asmlinkage long sys_time64(long __user * tloc)
256 do_gettimeofday(&now);
259 if (put_user(i,tloc))