2 * linux/arch/sh/kernel/sys_sh.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/SuperH
8 * Taken from i386 version.
11 #include <linux/errno.h>
12 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/utsname.h>
24 #include <linux/vs_cvirt.h>
26 #include <asm/uaccess.h>
30 * sys_pipe() is the normal C calling standard for creating
31 * a pipe. It's not the way Unix traditionally does this, though.
33 asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
34 unsigned long r6, unsigned long r7,
48 #if defined(HAVE_ARCH_UNMAPPED_AREA)
50 * To avoid cache alias, we map the shard page with same color.
52 #define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
54 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
55 unsigned long len, unsigned long pgoff, unsigned long flags)
57 struct mm_struct *mm = current->mm;
58 struct vm_area_struct *vma;
59 unsigned long start_addr;
61 if (flags & MAP_FIXED) {
62 /* We do not accept a shared mapping if it would violate
63 * cache aliasing constraints.
65 if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
74 if (flags & MAP_PRIVATE)
75 addr = PAGE_ALIGN(addr);
77 addr = COLOUR_ALIGN(addr);
78 vma = find_vma(mm, addr);
79 if (TASK_SIZE - len >= addr &&
80 (!vma || addr + len <= vma->vm_start))
83 if (len <= mm->cached_hole_size) {
84 mm->cached_hole_size = 0;
85 mm->free_area_cache = TASK_UNMAPPED_BASE;
87 if (flags & MAP_PRIVATE)
88 addr = PAGE_ALIGN(mm->free_area_cache);
90 addr = COLOUR_ALIGN(mm->free_area_cache);
94 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
95 /* At this point: (!vma || addr < vma->vm_end). */
96 if (TASK_SIZE - len < addr) {
98 * Start a new search - just in case we missed
101 if (start_addr != TASK_UNMAPPED_BASE) {
102 start_addr = addr = TASK_UNMAPPED_BASE;
103 mm->cached_hole_size = 0;
108 if (!vma || addr + len <= vma->vm_start) {
110 * Remember the place where we stopped the search:
112 mm->free_area_cache = addr + len;
115 if (addr + mm->cached_hole_size < vma->vm_start)
116 mm->cached_hole_size = vma->vm_start - addr;
119 if (!(flags & MAP_PRIVATE))
120 addr = COLOUR_ALIGN(addr);
126 do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
127 unsigned long flags, int fd, unsigned long pgoff)
130 struct file *file = NULL;
132 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
133 if (!(flags & MAP_ANONYMOUS)) {
139 down_write(¤t->mm->mmap_sem);
140 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
141 up_write(¤t->mm->mmap_sem);
149 asmlinkage int old_mmap(unsigned long addr, unsigned long len,
150 unsigned long prot, unsigned long flags,
151 int fd, unsigned long off)
153 if (off & ~PAGE_MASK)
155 return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
158 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
159 unsigned long prot, unsigned long flags,
160 unsigned long fd, unsigned long pgoff)
162 return do_mmap2(addr, len, prot, flags, fd, pgoff);
166 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
168 * This is really horribly ugly.
170 asmlinkage int sys_ipc(uint call, int first, int second,
171 int third, void __user *ptr, long fifth)
175 version = call >> 16; /* hack for backward compatibility */
181 return sys_semtimedop(first, (struct sembuf __user *)ptr,
184 return sys_semtimedop(first, (struct sembuf __user *)ptr,
186 (const struct timespec __user *)fifth);
188 return sys_semget (first, second, third);
193 if (get_user(fourth.__pad, (void * __user *) ptr))
195 return sys_semctl (first, second, third, fourth);
204 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
209 struct ipc_kludge tmp;
213 if (copy_from_user(&tmp,
214 (struct ipc_kludge __user *) ptr,
217 return sys_msgrcv (first, tmp.msgp, second,
221 return sys_msgrcv (first,
222 (struct msgbuf __user *) ptr,
223 second, fifth, third);
226 return sys_msgget ((key_t) first, second);
228 return sys_msgctl (first, second,
229 (struct msqid_ds __user *) ptr);
239 ret = do_shmat (first, (char __user *) ptr,
243 return put_user (raddr, (ulong __user *) third);
245 case 1: /* iBCS2 emulator entry point */
246 if (!segment_eq(get_fs(), get_ds()))
248 return do_shmat (first, (char __user *) ptr,
249 second, (ulong *) third);
252 return sys_shmdt ((char __user *)ptr);
254 return sys_shmget (first, second, third);
256 return sys_shmctl (first, second,
257 (struct shmid_ds __user *) ptr);
265 asmlinkage int sys_uname(struct old_utsname * name)
271 err=copy_to_user(name, vx_new_utsname(), sizeof (*name));
273 return err?-EFAULT:0;
276 asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
277 size_t count, long dummy, loff_t pos)
279 return sys_pread64(fd, buf, count, pos);
282 asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
283 size_t count, long dummy, loff_t pos)
285 return sys_pwrite64(fd, buf, count, pos);
288 asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
289 u32 len0, u32 len1, int advice)
291 #ifdef __LITTLE_ENDIAN__
292 return sys_fadvise64_64(fd, (u64)offset1 << 32 | offset0,
293 (u64)len1 << 32 | len0, advice);
295 return sys_fadvise64_64(fd, (u64)offset0 << 32 | offset1,
296 (u64)len0 << 32 | len1, advice);