2 * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c.
4 * Copyright (C) 2000 VA Linux Co
5 * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
6 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 * Copyright (C) 2000-2003 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
11 * Copyright (C) 2004 Gordon Jin <gordon.jin@intel.com>
13 * These routines maintain argument size conversion between 32bit and 64bit
17 #include <linux/config.h>
18 #include <linux/kernel.h>
19 #include <linux/syscalls.h>
20 #include <linux/sysctl.h>
21 #include <linux/sched.h>
23 #include <linux/file.h>
24 #include <linux/signal.h>
25 #include <linux/resource.h>
26 #include <linux/times.h>
27 #include <linux/utsname.h>
28 #include <linux/timex.h>
29 #include <linux/smp.h>
30 #include <linux/smp_lock.h>
31 #include <linux/sem.h>
32 #include <linux/msg.h>
34 #include <linux/shm.h>
35 #include <linux/slab.h>
36 #include <linux/uio.h>
37 #include <linux/nfs_fs.h>
38 #include <linux/quota.h>
39 #include <linux/sunrpc/svc.h>
40 #include <linux/nfsd/nfsd.h>
41 #include <linux/nfsd/cache.h>
42 #include <linux/nfsd/xdr.h>
43 #include <linux/nfsd/syscall.h>
44 #include <linux/poll.h>
45 #include <linux/eventpoll.h>
46 #include <linux/personality.h>
47 #include <linux/ptrace.h>
48 #include <linux/stat.h>
49 #include <linux/ipc.h>
50 #include <linux/compat.h>
51 #include <linux/vfs.h>
52 #include <linux/mman.h>
54 #include <asm/intrinsics.h>
55 #include <asm/semaphore.h>
56 #include <asm/types.h>
57 #include <asm/uaccess.h>
58 #include <asm/unistd.h>
68 # define DBG(fmt...) printk(KERN_DEBUG fmt)
73 #define A(__x) ((unsigned long)(__x))
74 #define AA(__x) ((unsigned long)(__x))
75 #define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
76 #define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
78 #define OFFSET4K(a) ((a) & 0xfff)
79 #define PAGE_START(addr) ((addr) & PAGE_MASK)
80 #define MINSIGSTKSZ_IA32 2048
82 #define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid))
83 #define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid))
85 extern unsigned long arch_get_unmapped_area (struct file *, unsigned long, unsigned long,
86 unsigned long, unsigned long);
89 * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore
92 /* XXX make per-mm: */
93 static DECLARE_MUTEX(ia32_mmap_sem);
96 sys32_execve (char *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp, struct pt_regs *regs)
100 unsigned long old_map_base, old_task_size, tssd;
102 filename = getname(name);
103 error = PTR_ERR(filename);
104 if (IS_ERR(filename))
107 old_map_base = current->thread.map_base;
108 old_task_size = current->thread.task_size;
109 tssd = ia64_get_kr(IA64_KR_TSSD);
111 /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */
112 current->thread.map_base = DEFAULT_MAP_BASE;
113 current->thread.task_size = DEFAULT_TASK_SIZE;
114 ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
115 ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
117 error = compat_do_execve(filename, argv, envp, regs);
121 /* oops, execve failed, switch back to old values... */
122 ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
123 ia64_set_kr(IA64_KR_TSSD, tssd);
124 current->thread.map_base = old_map_base;
125 current->thread.task_size = old_task_size;
131 int cp_compat_stat(struct kstat *stat, struct compat_stat *ubuf)
135 if ((u64) stat->size > MAX_NON_LFS ||
136 !old_valid_dev(stat->dev) ||
137 !old_valid_dev(stat->rdev))
140 if (clear_user(ubuf, sizeof(*ubuf)))
143 err = __put_user(old_encode_dev(stat->dev), &ubuf->st_dev);
144 err |= __put_user(stat->ino, &ubuf->st_ino);
145 err |= __put_user(stat->mode, &ubuf->st_mode);
146 err |= __put_user(stat->nlink, &ubuf->st_nlink);
147 err |= __put_user(high2lowuid(stat->uid), &ubuf->st_uid);
148 err |= __put_user(high2lowgid(stat->gid), &ubuf->st_gid);
149 err |= __put_user(old_encode_dev(stat->rdev), &ubuf->st_rdev);
150 err |= __put_user(stat->size, &ubuf->st_size);
151 err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime);
152 err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec);
153 err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime);
154 err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec);
155 err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime);
156 err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec);
157 err |= __put_user(stat->blksize, &ubuf->st_blksize);
158 err |= __put_user(stat->blocks, &ubuf->st_blocks);
162 #if PAGE_SHIFT > IA32_PAGE_SHIFT
166 get_page_prot (struct vm_area_struct *vma, unsigned long addr)
170 if (!vma || vma->vm_start > addr)
173 if (vma->vm_flags & VM_READ)
175 if (vma->vm_flags & VM_WRITE)
177 if (vma->vm_flags & VM_EXEC)
183 * Map a subpage by creating an anonymous page that contains the union of the old page and
187 mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
192 unsigned long ret = 0;
193 struct vm_area_struct *vma = find_vma(current->mm, start);
194 int old_prot = get_page_prot(vma, start);
196 DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
197 file, start, end, prot, flags, off);
200 /* Optimize the case where the old mmap and the new mmap are both anonymous */
201 if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
202 if (clear_user((void *) start, end - start)) {
209 page = (void *) get_zeroed_page(GFP_KERNEL);
214 copy_from_user(page, (void *) PAGE_START(start), PAGE_SIZE);
216 down_write(¤t->mm->mmap_sem);
218 ret = do_mmap(0, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE,
219 flags | MAP_FIXED | MAP_ANONYMOUS, 0);
221 up_write(¤t->mm->mmap_sem);
223 if (IS_ERR((void *) ret))
227 /* copy back the old page contents. */
228 if (offset_in_page(start))
229 copy_to_user((void *) PAGE_START(start), page, offset_in_page(start));
230 if (offset_in_page(end))
231 copy_to_user((void *) end, page + offset_in_page(end),
232 PAGE_SIZE - offset_in_page(end));
235 if (!(flags & MAP_ANONYMOUS)) {
236 /* read the file contents */
237 inode = file->f_dentry->d_inode;
238 if (!inode->i_fop || !file->f_op->read
239 || ((*file->f_op->read)(file, (char *) start, end - start, &off) < 0))
247 if (!(prot & PROT_WRITE))
248 ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
251 free_page((unsigned long) page);
255 /* SLAB cache for partial_page structures */
256 kmem_cache_t *partial_page_cachep;
259 * init partial_page_list.
260 * return 0 means kmalloc fail.
262 struct partial_page_list*
263 ia32_init_pp_list(void)
265 struct partial_page_list *p;
267 if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
272 atomic_set(&p->pp_count, 1);
277 * Search for the partial page with @start in partial page list @ppl.
278 * If finds the partial page, return the found partial page.
279 * Else, return 0 and provide @pprev, @rb_link, @rb_parent to
280 * be used by later __ia32_insert_pp().
282 static struct partial_page *
283 __ia32_find_pp(struct partial_page_list *ppl, unsigned int start,
284 struct partial_page **pprev, struct rb_node ***rb_link,
285 struct rb_node **rb_parent)
287 struct partial_page *pp;
288 struct rb_node **__rb_link, *__rb_parent, *rb_prev;
291 if (pp && pp->base == start)
294 __rb_link = &ppl->ppl_rb.rb_node;
295 rb_prev = __rb_parent = NULL;
298 __rb_parent = *__rb_link;
299 pp = rb_entry(__rb_parent, struct partial_page, pp_rb);
301 if (pp->base == start) {
304 } else if (pp->base < start) {
305 rb_prev = __rb_parent;
306 __rb_link = &__rb_parent->rb_right;
308 __rb_link = &__rb_parent->rb_left;
312 *rb_link = __rb_link;
313 *rb_parent = __rb_parent;
316 *pprev = rb_entry(rb_prev, struct partial_page, pp_rb);
321 * insert @pp into @ppl.
324 __ia32_insert_pp(struct partial_page_list *ppl, struct partial_page *pp,
325 struct partial_page *prev, struct rb_node **rb_link,
326 struct rb_node *rb_parent)
330 pp->next = prev->next;
335 pp->next = rb_entry(rb_parent,
336 struct partial_page, pp_rb);
342 rb_link_node(&pp->pp_rb, rb_parent, rb_link);
343 rb_insert_color(&pp->pp_rb, &ppl->ppl_rb);
349 * delete @pp from partial page list @ppl.
352 __ia32_delete_pp(struct partial_page_list *ppl, struct partial_page *pp,
353 struct partial_page *prev)
356 prev->next = pp->next;
357 if (ppl->pp_hint == pp)
360 ppl->pp_head = pp->next;
361 if (ppl->pp_hint == pp)
362 ppl->pp_hint = pp->next;
364 rb_erase(&pp->pp_rb, &ppl->ppl_rb);
365 kmem_cache_free(partial_page_cachep, pp);
368 static struct partial_page *
369 __pp_prev(struct partial_page *pp)
371 struct rb_node *prev = rb_prev(&pp->pp_rb);
373 return rb_entry(prev, struct partial_page, pp_rb);
379 * Delete partial pages with address between @start and @end.
380 * @start and @end are page aligned.
383 __ia32_delete_pp_range(unsigned int start, unsigned int end)
385 struct partial_page *pp, *prev;
386 struct rb_node **rb_link, *rb_parent;
391 pp = __ia32_find_pp(current->thread.ppl, start, &prev,
392 &rb_link, &rb_parent);
394 prev = __pp_prev(pp);
399 pp = current->thread.ppl->pp_head;
402 while (pp && pp->base < end) {
403 struct partial_page *tmp = pp->next;
404 __ia32_delete_pp(current->thread.ppl, pp, prev);
410 * Set the range between @start and @end in bitmap.
411 * @start and @end should be IA32 page aligned and in the same IA64 page.
414 __ia32_set_pp(unsigned int start, unsigned int end, int flags)
416 struct partial_page *pp, *prev;
417 struct rb_node ** rb_link, *rb_parent;
418 unsigned int pstart, start_bit, end_bit, i;
420 pstart = PAGE_START(start);
421 start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
422 end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
424 end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
425 pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
426 &rb_link, &rb_parent);
428 for (i = start_bit; i < end_bit; i++)
429 set_bit(i, &pp->bitmap);
431 * Check: if this partial page has been set to a full page,
434 if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >=
435 PAGE_SIZE/IA32_PAGE_SIZE) {
436 __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
442 * MAP_FIXED may lead to overlapping mmap.
443 * In this case, the requested mmap area may already mmaped as a full
444 * page. So check vma before adding a new partial page.
446 if (flags & MAP_FIXED) {
447 struct vm_area_struct *vma = find_vma(current->mm, pstart);
448 if (vma && vma->vm_start <= pstart)
452 /* new a partial_page */
453 pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
458 for (i=start_bit; i<end_bit; i++)
459 set_bit(i, &(pp->bitmap));
461 __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
466 * @start and @end should be IA32 page aligned, but don't need to be in the
467 * same IA64 page. Split @start and @end to make sure they're in the same IA64
468 * page, then call __ia32_set_pp().
471 ia32_set_pp(unsigned int start, unsigned int end, int flags)
473 down_write(¤t->mm->mmap_sem);
474 if (flags & MAP_FIXED) {
476 * MAP_FIXED may lead to overlapping mmap. When this happens,
477 * a series of complete IA64 pages results in deletion of
478 * old partial pages in that range.
480 __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
483 if (end < PAGE_ALIGN(start)) {
484 __ia32_set_pp(start, end, flags);
486 if (offset_in_page(start))
487 __ia32_set_pp(start, PAGE_ALIGN(start), flags);
488 if (offset_in_page(end))
489 __ia32_set_pp(PAGE_START(end), end, flags);
491 up_write(¤t->mm->mmap_sem);
495 * Unset the range between @start and @end in bitmap.
496 * @start and @end should be IA32 page aligned and in the same IA64 page.
497 * After doing that, if the bitmap is 0, then free the page and return 1,
499 * If not find the partial page in the list, then
500 * If the vma exists, then the full page is set to a partial page;
501 * Else return -ENOMEM.
504 __ia32_unset_pp(unsigned int start, unsigned int end)
506 struct partial_page *pp, *prev;
507 struct rb_node ** rb_link, *rb_parent;
508 unsigned int pstart, start_bit, end_bit, i;
509 struct vm_area_struct *vma;
511 pstart = PAGE_START(start);
512 start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
513 end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
515 end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
517 pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
518 &rb_link, &rb_parent);
520 for (i = start_bit; i < end_bit; i++)
521 clear_bit(i, &pp->bitmap);
522 if (pp->bitmap == 0) {
523 __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
529 vma = find_vma(current->mm, pstart);
530 if (!vma || vma->vm_start > pstart) {
534 /* new a partial_page */
535 pp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
540 for (i = 0; i < start_bit; i++)
541 set_bit(i, &(pp->bitmap));
542 for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++)
543 set_bit(i, &(pp->bitmap));
545 __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
550 * Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling
551 * __ia32_delete_pp_range(). Unset possible partial pages by calling
553 * The returned value see __ia32_unset_pp().
556 ia32_unset_pp(unsigned int *startp, unsigned int *endp)
558 unsigned int start = *startp, end = *endp;
561 down_write(¤t->mm->mmap_sem);
563 __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
565 if (end < PAGE_ALIGN(start)) {
566 ret = __ia32_unset_pp(start, end);
568 *startp = PAGE_START(start);
569 *endp = PAGE_ALIGN(end);
572 /* to shortcut sys_munmap() in sys32_munmap() */
573 *startp = PAGE_START(start);
574 *endp = PAGE_START(end);
577 if (offset_in_page(start)) {
578 ret = __ia32_unset_pp(start, PAGE_ALIGN(start));
580 *startp = PAGE_START(start);
582 *startp = PAGE_ALIGN(start);
586 if (offset_in_page(end)) {
587 ret = __ia32_unset_pp(PAGE_START(end), end);
589 *endp = PAGE_ALIGN(end);
591 *endp = PAGE_START(end);
596 up_write(¤t->mm->mmap_sem);
601 * Compare the range between @start and @end with bitmap in partial page.
602 * @start and @end should be IA32 page aligned and in the same IA64 page.
605 __ia32_compare_pp(unsigned int start, unsigned int end)
607 struct partial_page *pp, *prev;
608 struct rb_node ** rb_link, *rb_parent;
609 unsigned int pstart, start_bit, end_bit, size;
610 unsigned int first_bit, next_zero_bit; /* the first range in bitmap */
612 pstart = PAGE_START(start);
614 pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
615 &rb_link, &rb_parent);
619 start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
620 end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
621 size = sizeof(pp->bitmap) * 8;
622 first_bit = find_first_bit(&pp->bitmap, size);
623 next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit);
624 if ((start_bit < first_bit) || (end_bit > next_zero_bit)) {
625 /* exceeds the first range in bitmap */
627 } else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) {
628 first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit);
629 if ((next_zero_bit < first_bit) && (first_bit < size))
630 return 1; /* has next range */
632 return 0; /* no next range */
638 * @start and @end should be IA32 page aligned, but don't need to be in the
639 * same IA64 page. Split @start and @end to make sure they're in the same IA64
640 * page, then call __ia32_compare_pp().
642 * Take this as example: the range is the 1st and 2nd 4K page.
643 * Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011;
644 * Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111;
645 * Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or
649 ia32_compare_pp(unsigned int *startp, unsigned int *endp)
651 unsigned int start = *startp, end = *endp;
654 down_write(¤t->mm->mmap_sem);
656 if (end < PAGE_ALIGN(start)) {
657 retval = __ia32_compare_pp(start, end);
659 *startp = PAGE_START(start);
660 *endp = PAGE_ALIGN(end);
663 if (offset_in_page(start)) {
664 retval = __ia32_compare_pp(start,
667 *startp = PAGE_START(start);
671 if (offset_in_page(end)) {
672 retval = __ia32_compare_pp(PAGE_START(end), end);
674 *endp = PAGE_ALIGN(end);
679 up_write(¤t->mm->mmap_sem);
684 __ia32_drop_pp_list(struct partial_page_list *ppl)
686 struct partial_page *pp = ppl->pp_head;
689 struct partial_page *next = pp->next;
690 kmem_cache_free(partial_page_cachep, pp);
698 ia32_drop_partial_page_list(struct task_struct *task)
700 struct partial_page_list* ppl = task->thread.ppl;
702 if (ppl && atomic_dec_and_test(&ppl->pp_count))
703 __ia32_drop_pp_list(ppl);
707 * Copy current->thread.ppl to ppl (already initialized).
710 __ia32_copy_pp_list(struct partial_page_list *ppl)
712 struct partial_page *pp, *tmp, *prev;
713 struct rb_node **rb_link, *rb_parent;
717 ppl->ppl_rb = RB_ROOT;
718 rb_link = &ppl->ppl_rb.rb_node;
722 for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
723 tmp = kmem_cache_alloc(partial_page_cachep, GFP_KERNEL);
727 __ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent);
729 rb_link = &tmp->pp_rb.rb_right;
730 rb_parent = &tmp->pp_rb;
736 ia32_copy_partial_page_list(struct task_struct *p, unsigned long clone_flags)
740 if (clone_flags & CLONE_VM) {
741 atomic_inc(¤t->thread.ppl->pp_count);
742 p->thread.ppl = current->thread.ppl;
744 p->thread.ppl = ia32_init_pp_list();
747 down_write(¤t->mm->mmap_sem);
749 retval = __ia32_copy_pp_list(p->thread.ppl);
751 up_write(¤t->mm->mmap_sem);
758 emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags,
761 unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0;
766 pstart = PAGE_START(start);
767 pend = PAGE_ALIGN(end);
769 if (flags & MAP_FIXED) {
770 ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
771 if (start > pstart) {
772 if (flags & MAP_SHARED)
774 "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
775 current->comm, current->pid, start);
776 ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
778 if (IS_ERR((void *) ret))
785 if (flags & MAP_SHARED)
787 "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
788 current->comm, current->pid, end);
789 ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
790 (off + len) - offset_in_page(end));
791 if (IS_ERR((void *) ret))
799 * If a start address was specified, use it if the entire rounded out area
802 if (start && !pstart)
803 fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */
804 tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags);
807 start = pstart + offset_in_page(off); /* make start congruent with off */
809 pend = PAGE_ALIGN(end);
813 poff = off + (pstart - start); /* note: (pstart - start) may be negative */
814 is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0);
816 if ((flags & MAP_SHARED) && !is_congruent)
817 printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
818 "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off);
820 DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
821 is_congruent ? "congruent" : "not congruent", poff);
823 down_write(¤t->mm->mmap_sem);
825 if (!(flags & MAP_ANONYMOUS) && is_congruent)
826 ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff);
828 ret = do_mmap(0, pstart, pend - pstart,
829 prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE),
830 flags | MAP_FIXED | MAP_ANONYMOUS, 0);
832 up_write(¤t->mm->mmap_sem);
834 if (IS_ERR((void *) ret))
838 /* read the file contents */
839 inode = file->f_dentry->d_inode;
840 if (!inode->i_fop || !file->f_op->read
841 || ((*file->f_op->read)(file, (char *) pstart, pend - pstart, &poff) < 0))
843 sys_munmap(pstart, pend - pstart);
846 if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0)
850 if (!(flags & MAP_FIXED))
851 ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
856 #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
858 static inline unsigned int
859 get_prot32 (unsigned int prot)
861 if (prot & PROT_WRITE)
862 /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */
863 prot |= PROT_READ | PROT_WRITE | PROT_EXEC;
864 else if (prot & (PROT_READ | PROT_EXEC))
865 /* on x86, there is no distinction between PROT_READ and PROT_EXEC */
866 prot |= (PROT_READ | PROT_EXEC);
872 ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags,
875 DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n",
876 file, addr, len, prot, flags, offset);
878 if (file && (!file->f_op || !file->f_op->mmap))
881 len = IA32_PAGE_ALIGN(len);
885 if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len)
887 if (flags & MAP_FIXED)
893 if (OFFSET4K(offset))
896 prot = get_prot32(prot);
898 #if PAGE_SHIFT > IA32_PAGE_SHIFT
899 down(&ia32_mmap_sem);
901 addr = emulate_mmap(file, addr, len, prot, flags, offset);
905 down_write(¤t->mm->mmap_sem);
907 addr = do_mmap(file, addr, len, prot, flags, offset);
909 up_write(¤t->mm->mmap_sem);
911 DBG("ia32_do_mmap: returning 0x%lx\n", addr);
916 * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these
917 * system calls used a memory block for parameter passing..
920 struct mmap_arg_struct {
930 sys32_mmap (struct mmap_arg_struct *arg)
932 struct mmap_arg_struct a;
933 struct file *file = NULL;
937 if (copy_from_user(&a, arg, sizeof(a)))
940 if (OFFSET4K(a.offset))
945 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
946 if (!(flags & MAP_ANONYMOUS)) {
952 addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset);
960 sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags,
961 unsigned int fd, unsigned int pgoff)
963 struct file *file = NULL;
964 unsigned long retval;
966 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
967 if (!(flags & MAP_ANONYMOUS)) {
973 retval = ia32_do_mmap(file, addr, len, prot, flags,
974 (unsigned long) pgoff << IA32_PAGE_SHIFT);
982 sys32_munmap (unsigned int start, unsigned int len)
984 unsigned int end = start + len;
987 #if PAGE_SHIFT <= IA32_PAGE_SHIFT
988 ret = sys_munmap(start, end - start);
993 end = IA32_PAGE_ALIGN(end);
997 ret = ia32_unset_pp(&start, &end);
1004 down(&ia32_mmap_sem);
1006 ret = sys_munmap(start, end - start);
1013 #if PAGE_SHIFT > IA32_PAGE_SHIFT
1016 * When mprotect()ing a partial page, we set the permission to the union of the old
1017 * settings and the new settings. In other words, it's only possible to make access to a
1018 * partial page less restrictive.
1021 mprotect_subpage (unsigned long address, int new_prot)
1024 struct vm_area_struct *vma;
1026 if (new_prot == PROT_NONE)
1027 return 0; /* optimize case where nothing changes... */
1028 vma = find_vma(current->mm, address);
1029 old_prot = get_page_prot(vma, address);
1030 return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
1033 #endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
1036 sys32_mprotect (unsigned int start, unsigned int len, int prot)
1038 unsigned int end = start + len;
1039 #if PAGE_SHIFT > IA32_PAGE_SHIFT
1043 prot = get_prot32(prot);
1045 #if PAGE_SHIFT <= IA32_PAGE_SHIFT
1046 return sys_mprotect(start, end - start, prot);
1048 if (OFFSET4K(start))
1051 end = IA32_PAGE_ALIGN(end);
1055 retval = ia32_compare_pp(&start, &end);
1060 down(&ia32_mmap_sem);
1062 if (offset_in_page(start)) {
1063 /* start address is 4KB aligned but not page aligned. */
1064 retval = mprotect_subpage(PAGE_START(start), prot);
1068 start = PAGE_ALIGN(start);
1070 goto out; /* retval is already zero... */
1073 if (offset_in_page(end)) {
1074 /* end address is 4KB aligned but not page aligned. */
1075 retval = mprotect_subpage(PAGE_START(end), prot);
1079 end = PAGE_START(end);
1081 retval = sys_mprotect(start, end - start, prot);
1090 sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
1091 unsigned int flags, unsigned int new_addr)
1095 #if PAGE_SHIFT <= IA32_PAGE_SHIFT
1096 ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
1098 unsigned int old_end, new_end;
1103 old_len = IA32_PAGE_ALIGN(old_len);
1104 new_len = IA32_PAGE_ALIGN(new_len);
1105 old_end = addr + old_len;
1106 new_end = addr + new_len;
1111 if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr)))
1114 if (old_len >= new_len) {
1115 ret = sys32_munmap(addr + new_len, old_len - new_len);
1116 if (ret && old_len != new_len)
1119 if (!(flags & MREMAP_FIXED) || (new_addr == addr))
1124 addr = PAGE_START(addr);
1125 old_len = PAGE_ALIGN(old_end) - addr;
1126 new_len = PAGE_ALIGN(new_end) - addr;
1128 down(&ia32_mmap_sem);
1130 ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
1134 if ((ret >= 0) && (old_len < new_len)) {
1135 /* mremap expanded successfully */
1136 ia32_set_pp(old_end, new_end, flags);
1143 sys32_pipe (int *fd)
1148 retval = do_pipe(fds);
1151 if (copy_to_user(fd, fds, sizeof(fds)))
1158 get_tv32 (struct timeval *o, struct compat_timeval *i)
1160 return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
1161 (__get_user(o->tv_sec, &i->tv_sec) | __get_user(o->tv_usec, &i->tv_usec)));
1165 put_tv32 (struct compat_timeval *o, struct timeval *i)
1167 return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
1168 (__put_user(i->tv_sec, &o->tv_sec) | __put_user(i->tv_usec, &o->tv_usec)));
1171 asmlinkage unsigned long
1172 sys32_alarm (unsigned int seconds)
1174 struct itimerval it_new, it_old;
1175 unsigned int oldalarm;
1177 it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
1178 it_new.it_value.tv_sec = seconds;
1179 it_new.it_value.tv_usec = 0;
1180 do_setitimer(ITIMER_REAL, &it_new, &it_old);
1181 oldalarm = it_old.it_value.tv_sec;
1182 /* ehhh.. We can't return 0 if we have an alarm pending.. */
1183 /* And we'd better return too much than too little anyway */
1184 if (it_old.it_value.tv_usec)
1189 /* Translations due to time_t size differences. Which affects all
1190 sorts of things, like timeval and itimerval. */
1192 extern struct timezone sys_tz;
1195 sys32_gettimeofday (struct compat_timeval *tv, struct timezone *tz)
1199 do_gettimeofday(&ktv);
1200 if (put_tv32(tv, &ktv))
1204 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
1211 sys32_settimeofday (struct compat_timeval *tv, struct timezone *tz)
1214 struct timespec kts;
1215 struct timezone ktz;
1218 if (get_tv32(&ktv, tv))
1220 kts.tv_sec = ktv.tv_sec;
1221 kts.tv_nsec = ktv.tv_usec * 1000;
1224 if (copy_from_user(&ktz, tz, sizeof(ktz)))
1228 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
1231 struct getdents32_callback {
1232 struct compat_dirent * current_dir;
1233 struct compat_dirent * previous;
1238 struct readdir32_callback {
1239 struct old_linux32_dirent * dirent;
1244 filldir32 (void *__buf, const char *name, int namlen, loff_t offset, ino_t ino,
1245 unsigned int d_type)
1247 struct compat_dirent * dirent;
1248 struct getdents32_callback * buf = (struct getdents32_callback *) __buf;
1249 int reclen = ROUND_UP(NAME_OFFSET(dirent) + namlen + 1, 4);
1251 buf->error = -EINVAL; /* only used if we fail.. */
1252 if (reclen > buf->count)
1254 buf->error = -EFAULT; /* only used if we fail.. */
1255 dirent = buf->previous;
1257 if (put_user(offset, &dirent->d_off))
1259 dirent = buf->current_dir;
1260 buf->previous = dirent;
1261 if (put_user(ino, &dirent->d_ino)
1262 || put_user(reclen, &dirent->d_reclen)
1263 || copy_to_user(dirent->d_name, name, namlen)
1264 || put_user(0, dirent->d_name + namlen))
1266 dirent = (struct compat_dirent *) ((char *) dirent + reclen);
1267 buf->current_dir = dirent;
1268 buf->count -= reclen;
1273 sys32_getdents (unsigned int fd, struct compat_dirent *dirent, unsigned int count)
1276 struct compat_dirent * lastdirent;
1277 struct getdents32_callback buf;
1285 buf.current_dir = dirent;
1286 buf.previous = NULL;
1290 error = vfs_readdir(file, filldir32, &buf);
1294 lastdirent = buf.previous;
1297 if (put_user(file->f_pos, &lastdirent->d_off))
1299 error = count - buf.count;
1309 fillonedir32 (void * __buf, const char * name, int namlen, loff_t offset, ino_t ino,
1310 unsigned int d_type)
1312 struct readdir32_callback * buf = (struct readdir32_callback *) __buf;
1313 struct old_linux32_dirent * dirent;
1318 dirent = buf->dirent;
1319 if (put_user(ino, &dirent->d_ino)
1320 || put_user(offset, &dirent->d_offset)
1321 || put_user(namlen, &dirent->d_namlen)
1322 || copy_to_user(dirent->d_name, name, namlen)
1323 || put_user(0, dirent->d_name + namlen))
1329 sys32_readdir (unsigned int fd, void *dirent, unsigned int count)
1333 struct readdir32_callback buf;
1341 buf.dirent = dirent;
1343 error = vfs_readdir(file, fillonedir32, &buf);
1351 struct sel_arg_struct {
1360 sys32_old_select (struct sel_arg_struct *arg)
1362 struct sel_arg_struct a;
1364 if (copy_from_user(&a, arg, sizeof(a)))
1366 return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
1367 compat_ptr(a.exp), compat_ptr(a.tvp));
1373 #define SEMTIMEDOP 4
1384 sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
1388 version = call >> 16; /* hack for backward compatibility */
1394 return compat_sys_semtimedop(first, compat_ptr(ptr),
1395 second, compat_ptr(fifth));
1396 /* else fall through for normal semop() */
1398 /* struct sembuf is the same on 32 and 64bit :)) */
1399 return sys_semtimedop(first, compat_ptr(ptr), second,
1402 return sys_semget(first, second, third);
1404 return compat_sys_semctl(first, second, third, compat_ptr(ptr));
1407 return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
1409 return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr));
1411 return sys_msgget((key_t) first, second);
1413 return compat_sys_msgctl(first, second, compat_ptr(ptr));
1416 return compat_sys_shmat(first, second, third, version, compat_ptr(ptr));
1419 return sys_shmdt(compat_ptr(ptr));
1421 return sys_shmget(first, second, third);
1423 return compat_sys_shmctl(first, second, compat_ptr(ptr));
1432 * sys_time() can be implemented in user-level using
1433 * sys_gettimeofday(). IA64 did this but i386 Linux did not
1434 * so we have to implement this system call here.
1437 sys32_time (int *tloc)
1442 do_gettimeofday(&tv);
1446 if (put_user(i, tloc))
1453 compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options,
1454 struct compat_rusage *ru);
1457 sys32_waitpid (int pid, unsigned int *stat_addr, int options)
1459 return compat_sys_wait4(pid, stat_addr, options, NULL);
1463 ia32_peek (struct pt_regs *regs, struct task_struct *child, unsigned long addr, unsigned int *val)
1468 copied = access_process_vm(child, addr, val, sizeof(*val), 0);
1469 return (copied != sizeof(ret)) ? -EIO : 0;
1473 ia32_poke (struct pt_regs *regs, struct task_struct *child, unsigned long addr, unsigned int val)
1476 if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
1482 * The order in which registers are stored in the ptrace regs structure
1495 #define PT_ORIG_EAX 11
1503 getreg (struct task_struct *child, int regno)
1505 struct pt_regs *child_regs;
1507 child_regs = ia64_task_regs(child);
1508 switch (regno / sizeof(int)) {
1509 case PT_EBX: return child_regs->r11;
1510 case PT_ECX: return child_regs->r9;
1511 case PT_EDX: return child_regs->r10;
1512 case PT_ESI: return child_regs->r14;
1513 case PT_EDI: return child_regs->r15;
1514 case PT_EBP: return child_regs->r13;
1515 case PT_EAX: return child_regs->r8;
1516 case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */
1517 case PT_EIP: return child_regs->cr_iip;
1518 case PT_UESP: return child_regs->r12;
1519 case PT_EFL: return child->thread.eflag;
1520 case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
1522 case PT_CS: return __USER_CS;
1524 printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno);
1531 putreg (struct task_struct *child, int regno, unsigned int value)
1533 struct pt_regs *child_regs;
1535 child_regs = ia64_task_regs(child);
1536 switch (regno / sizeof(int)) {
1537 case PT_EBX: child_regs->r11 = value; break;
1538 case PT_ECX: child_regs->r9 = value; break;
1539 case PT_EDX: child_regs->r10 = value; break;
1540 case PT_ESI: child_regs->r14 = value; break;
1541 case PT_EDI: child_regs->r15 = value; break;
1542 case PT_EBP: child_regs->r13 = value; break;
1543 case PT_EAX: child_regs->r8 = value; break;
1544 case PT_ORIG_EAX: child_regs->r1 = value; break;
1545 case PT_EIP: child_regs->cr_iip = value; break;
1546 case PT_UESP: child_regs->r12 = value; break;
1547 case PT_EFL: child->thread.eflag = value; break;
1548 case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
1549 if (value != __USER_DS)
1551 "ia32.putreg: attempt to set invalid segment register %d = %x\n",
1555 if (value != __USER_CS)
1557 "ia32.putreg: attempt to to set invalid segment register %d = %x\n",
1561 printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno);
1567 put_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switch_stack *swp,
1570 struct _fpreg_ia32 *f;
1573 f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
1574 if ((regno += tos) >= 8)
1578 ia64f2ia32f(f, &ptp->f8);
1581 ia64f2ia32f(f, &ptp->f9);
1584 ia64f2ia32f(f, &ptp->f10);
1587 ia64f2ia32f(f, &ptp->f11);
1593 ia64f2ia32f(f, &swp->f12 + (regno - 4));
1596 copy_to_user(reg, f, sizeof(*reg));
1600 get_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switch_stack *swp,
1604 if ((regno += tos) >= 8)
1608 copy_from_user(&ptp->f8, reg, sizeof(*reg));
1611 copy_from_user(&ptp->f9, reg, sizeof(*reg));
1614 copy_from_user(&ptp->f10, reg, sizeof(*reg));
1617 copy_from_user(&ptp->f11, reg, sizeof(*reg));
1623 copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
1630 save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *save)
1632 struct switch_stack *swp;
1633 struct pt_regs *ptp;
1636 if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
1639 __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
1640 __put_user(tsk->thread.fsr & 0xffff, &save->swd);
1641 __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
1642 __put_user(tsk->thread.fir, &save->fip);
1643 __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
1644 __put_user(tsk->thread.fdr, &save->foo);
1645 __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
1648 * Stack frames start with 16-bytes of temp space
1650 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1651 ptp = ia64_task_regs(tsk);
1652 tos = (tsk->thread.fsr >> 11) & 7;
1653 for (i = 0; i < 8; i++)
1654 put_fpreg(i, &save->st_space[i], ptp, swp, tos);
1659 restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct *save)
1661 struct switch_stack *swp;
1662 struct pt_regs *ptp;
1664 unsigned int fsrlo, fsrhi, num32;
1666 if (!access_ok(VERIFY_READ, save, sizeof(*save)))
1669 __get_user(num32, (unsigned int *)&save->cwd);
1670 tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
1671 __get_user(fsrlo, (unsigned int *)&save->swd);
1672 __get_user(fsrhi, (unsigned int *)&save->twd);
1673 num32 = (fsrhi << 16) | fsrlo;
1674 tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
1675 __get_user(num32, (unsigned int *)&save->fip);
1676 tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
1677 __get_user(num32, (unsigned int *)&save->foo);
1678 tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
1681 * Stack frames start with 16-bytes of temp space
1683 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1684 ptp = ia64_task_regs(tsk);
1685 tos = (tsk->thread.fsr >> 11) & 7;
1686 for (i = 0; i < 8; i++)
1687 get_fpreg(i, &save->st_space[i], ptp, swp, tos);
1692 save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *save)
1694 struct switch_stack *swp;
1695 struct pt_regs *ptp;
1697 unsigned long mxcsr=0;
1698 unsigned long num128[2];
1700 if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
1703 __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
1704 __put_user(tsk->thread.fsr & 0xffff, &save->swd);
1705 __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
1706 __put_user(tsk->thread.fir, &save->fip);
1707 __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
1708 __put_user(tsk->thread.fdr, &save->foo);
1709 __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
1712 * Stack frames start with 16-bytes of temp space
1714 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1715 ptp = ia64_task_regs(tsk);
1716 tos = (tsk->thread.fsr >> 11) & 7;
1717 for (i = 0; i < 8; i++)
1718 put_fpreg(i, (struct _fpreg_ia32 *)&save->st_space[4*i], ptp, swp, tos);
1720 mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f);
1721 __put_user(mxcsr & 0xffff, &save->mxcsr);
1722 for (i = 0; i < 8; i++) {
1723 memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long));
1724 memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long));
1725 copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32));
1731 restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *save)
1733 struct switch_stack *swp;
1734 struct pt_regs *ptp;
1736 unsigned int fsrlo, fsrhi, num32;
1738 unsigned long num64;
1739 unsigned long num128[2];
1741 if (!access_ok(VERIFY_READ, save, sizeof(*save)))
1744 __get_user(num32, (unsigned int *)&save->cwd);
1745 tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
1746 __get_user(fsrlo, (unsigned int *)&save->swd);
1747 __get_user(fsrhi, (unsigned int *)&save->twd);
1748 num32 = (fsrhi << 16) | fsrlo;
1749 tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
1750 __get_user(num32, (unsigned int *)&save->fip);
1751 tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
1752 __get_user(num32, (unsigned int *)&save->foo);
1753 tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
1756 * Stack frames start with 16-bytes of temp space
1758 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1759 ptp = ia64_task_regs(tsk);
1760 tos = (tsk->thread.fsr >> 11) & 7;
1761 for (i = 0; i < 8; i++)
1762 get_fpreg(i, (struct _fpreg_ia32 *)&save->st_space[4*i], ptp, swp, tos);
1764 __get_user(mxcsr, (unsigned int *)&save->mxcsr);
1765 num64 = mxcsr & 0xff10;
1766 tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000)) | (num64<<32);
1767 num64 = mxcsr & 0x3f;
1768 tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000)) | (num64<<32);
1770 for (i = 0; i < 8; i++) {
1771 copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32));
1772 memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long));
1773 memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long));
1779 * Note that the IA32 version of `ptrace' calls the IA64 routine for
1780 * many of the requests. This will only work for requests that do
1781 * not need access to the calling processes `pt_regs' which is located
1782 * at the address of `stack'. Once we call the IA64 `sys_ptrace' then
1783 * the address of `stack' will not be the address of the `pt_regs'.
1786 sys32_ptrace (int request, pid_t pid, unsigned int addr, unsigned int data,
1787 long arg4, long arg5, long arg6, long arg7, long stack)
1789 struct pt_regs *regs = (struct pt_regs *) &stack;
1790 struct task_struct *child;
1791 unsigned int value, tmp;
1795 if (request == PTRACE_TRACEME) {
1796 ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
1801 read_lock(&tasklist_lock);
1802 child = find_task_by_pid(pid);
1804 get_task_struct(child);
1805 read_unlock(&tasklist_lock);
1809 if (pid == 1) /* no messing around with init! */
1812 if (request == PTRACE_ATTACH) {
1813 ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
1817 ret = ptrace_check_attach(child, request == PTRACE_KILL);
1822 case PTRACE_PEEKTEXT:
1823 case PTRACE_PEEKDATA: /* read word at location addr */
1824 ret = ia32_peek(regs, child, addr, &value);
1826 ret = put_user(value, (unsigned int *) A(data));
1831 case PTRACE_POKETEXT:
1832 case PTRACE_POKEDATA: /* write the word at location addr */
1833 ret = ia32_poke(regs, child, addr, data);
1836 case PTRACE_PEEKUSR: /* read word at addr in USER area */
1838 if ((addr & 3) || addr > 17*sizeof(int))
1841 tmp = getreg(child, addr);
1842 if (!put_user(tmp, (unsigned int *) A(data)))
1846 case PTRACE_POKEUSR: /* write word at addr in USER area */
1848 if ((addr & 3) || addr > 17*sizeof(int))
1851 putreg(child, addr, data);
1855 case IA32_PTRACE_GETREGS:
1856 if (!access_ok(VERIFY_WRITE, (int *) A(data), 17*sizeof(int))) {
1860 for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
1861 put_user(getreg(child, i), (unsigned int *) A(data));
1862 data += sizeof(int);
1867 case IA32_PTRACE_SETREGS:
1868 if (!access_ok(VERIFY_READ, (int *) A(data), 17*sizeof(int))) {
1872 for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
1873 get_user(tmp, (unsigned int *) A(data));
1874 putreg(child, i, tmp);
1875 data += sizeof(int);
1880 case IA32_PTRACE_GETFPREGS:
1881 ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct *) A(data));
1884 case IA32_PTRACE_GETFPXREGS:
1885 ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct *) A(data));
1888 case IA32_PTRACE_SETFPREGS:
1889 ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct *) A(data));
1892 case IA32_PTRACE_SETFPXREGS:
1893 ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct *) A(data));
1896 case PTRACE_SYSCALL: /* continue, stop after next syscall */
1897 case PTRACE_CONT: /* restart after signal. */
1899 case PTRACE_SINGLESTEP: /* execute chile for one instruction */
1900 case PTRACE_DETACH: /* detach a process */
1901 ret = sys_ptrace(request, pid, addr, data, arg4, arg5, arg6, arg7, stack);
1905 ret = ptrace_request(child, request, addr, data);
1910 put_task_struct(child);
1917 * The IA64 maps 4 I/O ports for each 4K page
1919 #define IOLEN ((65536 / 4) * 4096)
1922 sys32_iopl (int level)
1924 extern unsigned long ia64_iobase;
1929 mm_segment_t old_fs = get_fs ();
1933 /* Trying to gain more privileges? */
1934 old = ia64_getreg(_IA64_REG_AR_EFLAG);
1935 if ((unsigned int) level > ((old >> 12) & 3)) {
1936 if (!capable(CAP_SYS_RAWIO))
1940 fd = sys_open("/dev/mem", O_SYNC | O_RDWR, 0);
1950 down_write(¤t->mm->mmap_sem);
1951 addr = do_mmap_pgoff(file, IA32_IOBASE,
1952 IOLEN, PROT_READ|PROT_WRITE, MAP_SHARED,
1953 (ia64_iobase & ~PAGE_OFFSET) >> PAGE_SHIFT);
1954 up_write(¤t->mm->mmap_sem);
1957 old = (old & ~0x3000) | (level << 12);
1958 ia64_setreg(_IA64_REG_AR_EFLAG, old);
1967 sys32_ioperm (unsigned int from, unsigned int num, int on)
1971 * Since IA64 doesn't have permission bits we'd have to go to
1972 * a lot of trouble to simulate them in software. There's
1973 * no point, only trusted programs can make this call so we'll
1974 * just turn it into an iopl call and let the process have
1975 * access to all I/O ports.
1977 * XXX proper ioperm() support should be emulated by
1978 * manipulating the page protections...
1980 return sys32_iopl(3);
1985 unsigned int ss_flags;
1986 unsigned int ss_size;
1990 sys32_sigaltstack (ia32_stack_t *uss32, ia32_stack_t *uoss32,
1991 long arg2, long arg3, long arg4, long arg5, long arg6, long arg7, long stack)
1993 struct pt_regs *pt = (struct pt_regs *) &stack;
1997 mm_segment_t old_fs = get_fs();
2000 if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t)))
2002 uss.ss_sp = (void *) (long) buf32.ss_sp;
2003 uss.ss_flags = buf32.ss_flags;
2004 /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the
2005 check and set it to the user requested value later */
2006 if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) {
2010 uss.ss_size = MINSIGSTKSZ;
2013 ret = do_sigaltstack(uss32 ? &uss : NULL, &uoss, pt->r12);
2014 current->sas_ss_size = buf32.ss_size;
2020 buf32.ss_sp = (long) uoss.ss_sp;
2021 buf32.ss_flags = uoss.ss_flags;
2022 buf32.ss_size = uoss.ss_size;
2023 if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t)))
2032 current->state = TASK_INTERRUPTIBLE;
2034 return -ERESTARTNOHAND;
2038 sys32_msync (unsigned int start, unsigned int len, int flags)
2042 if (OFFSET4K(start))
2044 addr = PAGE_START(start);
2045 return sys_msync(addr, len + (start - addr), flags);
2051 unsigned int oldval;
2052 unsigned int oldlenp;
2053 unsigned int newval;
2054 unsigned int newlen;
2055 unsigned int __unused[4];
2059 sys32_sysctl (struct sysctl32 *args)
2061 #ifdef CONFIG_SYSCTL
2062 struct sysctl32 a32;
2063 mm_segment_t old_fs = get_fs ();
2064 void *oldvalp, *newvalp;
2069 if (copy_from_user(&a32, args, sizeof(a32)))
2073 * We need to pre-validate these because we have to disable address checking
2074 * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
2075 * user specifying bad addresses here. Well, since we're dealing with 32 bit
2076 * addresses, we KNOW that access_ok() will always succeed, so this is an
2077 * expensive NOP, but so what...
2079 namep = (int *) A(a32.name);
2080 oldvalp = (void *) A(a32.oldval);
2081 newvalp = (void *) A(a32.newval);
2083 if ((oldvalp && get_user(oldlen, (int *) A(a32.oldlenp)))
2084 || !access_ok(VERIFY_WRITE, namep, 0)
2085 || !access_ok(VERIFY_WRITE, oldvalp, 0)
2086 || !access_ok(VERIFY_WRITE, newvalp, 0))
2091 ret = do_sysctl(namep, a32.nlen, oldvalp, &oldlen, newvalp, (size_t) a32.newlen);
2095 if (oldvalp && put_user (oldlen, (int *) A(a32.oldlenp)))
2105 sys32_newuname (struct new_utsname *name)
2107 int ret = sys_newuname(name);
2110 if (copy_to_user(name->machine, "i686\0\0\0", 8))
2116 sys32_getresuid16 (u16 *ruid, u16 *euid, u16 *suid)
2120 mm_segment_t old_fs = get_fs();
2123 ret = sys_getresuid(&a, &b, &c);
2126 if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid))
2132 sys32_getresgid16 (u16 *rgid, u16 *egid, u16 *sgid)
2136 mm_segment_t old_fs = get_fs();
2139 ret = sys_getresgid(&a, &b, &c);
2145 return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid);
2149 sys32_lseek (unsigned int fd, int offset, unsigned int whence)
2151 /* Sign-extension of "offset" is important here... */
2152 return sys_lseek(fd, offset, whence);
2156 groups16_to_user(short *grouplist, struct group_info *group_info)
2161 for (i = 0; i < group_info->ngroups; i++) {
2162 group = (short)GROUP_AT(group_info, i);
2163 if (put_user(group, grouplist+i))
2171 groups16_from_user(struct group_info *group_info, short *grouplist)
2176 for (i = 0; i < group_info->ngroups; i++) {
2177 if (get_user(group, grouplist+i))
2179 GROUP_AT(group_info, i) = (gid_t)group;
2186 sys32_getgroups16 (int gidsetsize, short *grouplist)
2193 get_group_info(current->group_info);
2194 i = current->group_info->ngroups;
2196 if (i > gidsetsize) {
2200 if (groups16_to_user(grouplist, current->group_info)) {
2206 put_group_info(current->group_info);
2211 sys32_setgroups16 (int gidsetsize, short *grouplist)
2213 struct group_info *group_info;
2216 if (!capable(CAP_SETGID))
2218 if ((unsigned)gidsetsize > NGROUPS_MAX)
2221 group_info = groups_alloc(gidsetsize);
2224 retval = groups16_from_user(group_info, grouplist);
2226 put_group_info(group_info);
2230 retval = set_current_groups(group_info);
2231 put_group_info(group_info);
2237 sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
2239 return sys_truncate((const char *) A(path), ((unsigned long) len_hi << 32) | len_lo);
2243 sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi)
2245 return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo);
2249 putstat64 (struct stat64 *ubuf, struct kstat *kbuf)
2254 if (clear_user(ubuf, sizeof(*ubuf)))
2257 hdev = huge_encode_dev(kbuf->dev);
2258 err = __put_user(hdev, (u32*)&ubuf->st_dev);
2259 err |= __put_user(hdev >> 32, ((u32*)&ubuf->st_dev) + 1);
2260 err |= __put_user(kbuf->ino, &ubuf->__st_ino);
2261 err |= __put_user(kbuf->ino, &ubuf->st_ino_lo);
2262 err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi);
2263 err |= __put_user(kbuf->mode, &ubuf->st_mode);
2264 err |= __put_user(kbuf->nlink, &ubuf->st_nlink);
2265 err |= __put_user(kbuf->uid, &ubuf->st_uid);
2266 err |= __put_user(kbuf->gid, &ubuf->st_gid);
2267 hdev = huge_encode_dev(kbuf->rdev);
2268 err = __put_user(hdev, (u32*)&ubuf->st_rdev);
2269 err |= __put_user(hdev >> 32, ((u32*)&ubuf->st_rdev) + 1);
2270 err |= __put_user(kbuf->size, &ubuf->st_size_lo);
2271 err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi);
2272 err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime);
2273 err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec);
2274 err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime);
2275 err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec);
2276 err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime);
2277 err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec);
2278 err |= __put_user(kbuf->blksize, &ubuf->st_blksize);
2279 err |= __put_user(kbuf->blocks, &ubuf->st_blocks);
2284 sys32_stat64 (char *filename, struct stat64 *statbuf)
2287 long ret = vfs_stat(filename, &s);
2289 ret = putstat64(statbuf, &s);
2294 sys32_lstat64 (char *filename, struct stat64 *statbuf)
2297 long ret = vfs_lstat(filename, &s);
2299 ret = putstat64(statbuf, &s);
2304 sys32_fstat64 (unsigned int fd, struct stat64 *statbuf)
2307 long ret = vfs_fstat(fd, &s);
2309 ret = putstat64(statbuf, &s);
2331 sys32_sysinfo (struct sysinfo32 *info)
2336 mm_segment_t old_fs = get_fs();
2339 ret = sys_sysinfo(&s);
2341 /* Check to see if any memory value is too large for 32-bit and
2342 * scale down if needed.
2344 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
2345 while (s.mem_unit < PAGE_SIZE) {
2349 s.totalram >>= bitcount;
2350 s.freeram >>= bitcount;
2351 s.sharedram >>= bitcount;
2352 s.bufferram >>= bitcount;
2353 s.totalswap >>= bitcount;
2354 s.freeswap >>= bitcount;
2355 s.totalhigh >>= bitcount;
2356 s.freehigh >>= bitcount;
2359 if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
2362 err = __put_user(s.uptime, &info->uptime);
2363 err |= __put_user(s.loads[0], &info->loads[0]);
2364 err |= __put_user(s.loads[1], &info->loads[1]);
2365 err |= __put_user(s.loads[2], &info->loads[2]);
2366 err |= __put_user(s.totalram, &info->totalram);
2367 err |= __put_user(s.freeram, &info->freeram);
2368 err |= __put_user(s.sharedram, &info->sharedram);
2369 err |= __put_user(s.bufferram, &info->bufferram);
2370 err |= __put_user(s.totalswap, &info->totalswap);
2371 err |= __put_user(s.freeswap, &info->freeswap);
2372 err |= __put_user(s.procs, &info->procs);
2373 err |= __put_user (s.totalhigh, &info->totalhigh);
2374 err |= __put_user (s.freehigh, &info->freehigh);
2375 err |= __put_user (s.mem_unit, &info->mem_unit);
2382 sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec *interval)
2384 mm_segment_t old_fs = get_fs();
2389 ret = sys_sched_rr_get_interval(pid, &t);
2391 if (put_compat_timespec(&t, interval))
2397 sys32_pread (unsigned int fd, void *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
2399 return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
2403 sys32_pwrite (unsigned int fd, void *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
2405 return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
2409 sys32_sendfile (int out_fd, int in_fd, int *offset, unsigned int count)
2411 mm_segment_t old_fs = get_fs();
2415 if (offset && get_user(of, offset))
2419 ret = sys_sendfile(out_fd, in_fd, offset ? &of : NULL, count);
2422 if (!ret && offset && put_user(of, offset))
2429 sys32_personality (unsigned int personality)
2433 if (current->personality == PER_LINUX32 && personality == PER_LINUX)
2434 personality = PER_LINUX32;
2435 ret = sys_personality(personality);
2436 if (ret == PER_LINUX32)
2441 asmlinkage unsigned long
2442 sys32_brk (unsigned int brk)
2444 unsigned long ret, obrk;
2445 struct mm_struct *mm = current->mm;
2450 clear_user((void *) ret, PAGE_ALIGN(ret) - ret);
2455 * Exactly like fs/open.c:sys_open(), except that it doesn't set the O_LARGEFILE flag.
2458 sys32_open (const char * filename, int flags, int mode)
2463 tmp = getname(filename);
2466 fd = get_unused_fd();
2468 struct file *f = filp_open(tmp, flags, mode);
2485 /* Structure for ia32 emulation on ia64 */
2486 struct epoll_event32
2493 sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 *event)
2495 mm_segment_t old_fs = get_fs();
2496 struct epoll_event event64;
2497 int error = -EFAULT;
2500 if ((error = verify_area(VERIFY_READ, event,
2501 sizeof(struct epoll_event32))))
2504 __get_user(event64.events, &event->events);
2505 __get_user(data_halfword, &event->data[0]);
2506 event64.data = data_halfword;
2507 __get_user(data_halfword, &event->data[1]);
2508 event64.data |= (u64)data_halfword << 32;
2511 error = sys_epoll_ctl(epfd, op, fd, &event64);
2518 sys32_epoll_wait(int epfd, struct epoll_event32 *events, int maxevents,
2521 struct epoll_event *events64 = NULL;
2522 mm_segment_t old_fs = get_fs();
2523 int error, numevents, size;
2525 int do_free_pages = 0;
2527 if (maxevents <= 0) {
2531 /* Verify that the area passed by the user is writeable */
2532 if ((error = verify_area(VERIFY_WRITE, events,
2533 maxevents * sizeof(struct epoll_event32))))
2537 * Allocate space for the intermediate copy. If the space needed
2538 * is large enough to cause kmalloc to fail, then try again with
2541 size = maxevents * sizeof(struct epoll_event);
2542 events64 = kmalloc(size, GFP_KERNEL);
2543 if (events64 == NULL) {
2544 events64 = (struct epoll_event *)
2545 __get_free_pages(GFP_KERNEL, get_order(size));
2546 if (events64 == NULL)
2551 /* Do the system call */
2552 set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/
2553 numevents = sys_epoll_wait(epfd, events64, maxevents, timeout);
2556 /* Don't modify userspace memory if we're returning an error */
2557 if (numevents > 0) {
2558 /* Translate the 64-bit structures back into the 32-bit
2560 for (evt_idx = 0; evt_idx < numevents; evt_idx++) {
2561 __put_user(events64[evt_idx].events,
2562 &events[evt_idx].events);
2563 __put_user((u32)events64[evt_idx].data,
2564 &events[evt_idx].data[0]);
2565 __put_user((u32)(events64[evt_idx].data >> 32),
2566 &events[evt_idx].data[1]);
2571 free_pages((unsigned long) events64, get_order(size));
2578 * Get a yet unused TLS descriptor index.
2583 struct thread_struct *t = ¤t->thread;
2586 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
2587 if (desc_empty(t->tls_array + idx))
2588 return idx + GDT_ENTRY_TLS_MIN;
2593 * Set a given TLS descriptor:
2596 sys32_set_thread_area (struct ia32_user_desc *u_info)
2598 struct thread_struct *t = ¤t->thread;
2599 struct ia32_user_desc info;
2600 struct desc_struct *desc;
2603 if (copy_from_user(&info, u_info, sizeof(info)))
2605 idx = info.entry_number;
2608 * index -1 means the kernel should try to find and allocate an empty descriptor:
2611 idx = get_free_idx();
2614 if (put_user(idx, &u_info->entry_number))
2618 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
2621 desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
2623 cpu = smp_processor_id();
2625 if (LDT_empty(&info)) {
2629 desc->a = LDT_entry_a(&info);
2630 desc->b = LDT_entry_b(&info);
2637 * Get the current Thread-Local Storage area:
2640 #define GET_BASE(desc) ( \
2641 (((desc)->a >> 16) & 0x0000ffff) | \
2642 (((desc)->b << 16) & 0x00ff0000) | \
2643 ( (desc)->b & 0xff000000) )
2645 #define GET_LIMIT(desc) ( \
2646 ((desc)->a & 0x0ffff) | \
2647 ((desc)->b & 0xf0000) )
2649 #define GET_32BIT(desc) (((desc)->b >> 22) & 1)
2650 #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
2651 #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
2652 #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
2653 #define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
2654 #define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
2657 sys32_get_thread_area (struct ia32_user_desc *u_info)
2659 struct ia32_user_desc info;
2660 struct desc_struct *desc;
2663 if (get_user(idx, &u_info->entry_number))
2665 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
2668 desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
2670 info.entry_number = idx;
2671 info.base_addr = GET_BASE(desc);
2672 info.limit = GET_LIMIT(desc);
2673 info.seg_32bit = GET_32BIT(desc);
2674 info.contents = GET_CONTENTS(desc);
2675 info.read_exec_only = !GET_WRITABLE(desc);
2676 info.limit_in_pages = GET_LIMIT_PAGES(desc);
2677 info.seg_not_present = !GET_PRESENT(desc);
2678 info.useable = GET_USEABLE(desc);
2680 if (copy_to_user(u_info, &info, sizeof(info)))
2685 extern asmlinkage long
2686 sys_timer_create(clockid_t which_clock, struct sigevent *timer_event_spec,
2687 timer_t * created_timer_id);
2690 sys32_timer_create(u32 clock, struct sigevent32 *se32, timer_t *timer_id)
2698 return sys_timer_create(clock, NULL, timer_id);
2700 memset(&se, 0, sizeof(struct sigevent));
2701 if (get_user(se.sigev_value.sival_int, &se32->sigev_value.sival_int) ||
2702 __get_user(se.sigev_signo, &se32->sigev_signo) ||
2703 __get_user(se.sigev_notify, &se32->sigev_notify) ||
2704 __copy_from_user(&se._sigev_un._pad, &se32->_sigev_un._pad,
2705 sizeof(se._sigev_un._pad)))
2708 if (!access_ok(VERIFY_WRITE,timer_id,sizeof(timer_t)))
2713 err = sys_timer_create(clock, &se, &t);
2717 err = __put_user (t, timer_id);
2722 long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
2723 __u32 len_low, __u32 len_high, int advice)
2725 return sys_fadvise64_64(fd,
2726 (((u64)offset_high)<<32) | offset_low,
2727 (((u64)len_high)<<32) | len_low,
2731 #ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
2733 asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid)
2737 sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
2738 seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
2739 return sys_setreuid(sruid, seuid);
2743 sys32_setresuid(compat_uid_t ruid, compat_uid_t euid,
2746 uid_t sruid, seuid, ssuid;
2748 sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
2749 seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
2750 ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid);
2751 return sys_setresuid(sruid, seuid, ssuid);
2755 sys32_setregid(compat_gid_t rgid, compat_gid_t egid)
2759 srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
2760 segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
2761 return sys_setregid(srgid, segid);
2765 sys32_setresgid(compat_gid_t rgid, compat_gid_t egid,
2768 gid_t srgid, segid, ssgid;
2770 srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
2771 segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
2772 ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid);
2773 return sys_setresgid(srgid, segid, ssgid);
2776 /* Handle adjtimex compatibility. */
2780 s32 offset, freq, maxerror, esterror;
2781 s32 status, constant, precision, tolerance;
2782 struct compat_timeval time;
2784 s32 ppsfreq, jitter, shift, stabil;
2785 s32 jitcnt, calcnt, errcnt, stbcnt;
2786 s32 :32; s32 :32; s32 :32; s32 :32;
2787 s32 :32; s32 :32; s32 :32; s32 :32;
2788 s32 :32; s32 :32; s32 :32; s32 :32;
2791 extern int do_adjtimex(struct timex *);
2794 sys32_adjtimex(struct timex32 *utp)
2799 memset(&txc, 0, sizeof(struct timex));
2801 if(get_user(txc.modes, &utp->modes) ||
2802 __get_user(txc.offset, &utp->offset) ||
2803 __get_user(txc.freq, &utp->freq) ||
2804 __get_user(txc.maxerror, &utp->maxerror) ||
2805 __get_user(txc.esterror, &utp->esterror) ||
2806 __get_user(txc.status, &utp->status) ||
2807 __get_user(txc.constant, &utp->constant) ||
2808 __get_user(txc.precision, &utp->precision) ||
2809 __get_user(txc.tolerance, &utp->tolerance) ||
2810 __get_user(txc.time.tv_sec, &utp->time.tv_sec) ||
2811 __get_user(txc.time.tv_usec, &utp->time.tv_usec) ||
2812 __get_user(txc.tick, &utp->tick) ||
2813 __get_user(txc.ppsfreq, &utp->ppsfreq) ||
2814 __get_user(txc.jitter, &utp->jitter) ||
2815 __get_user(txc.shift, &utp->shift) ||
2816 __get_user(txc.stabil, &utp->stabil) ||
2817 __get_user(txc.jitcnt, &utp->jitcnt) ||
2818 __get_user(txc.calcnt, &utp->calcnt) ||
2819 __get_user(txc.errcnt, &utp->errcnt) ||
2820 __get_user(txc.stbcnt, &utp->stbcnt))
2823 ret = do_adjtimex(&txc);
2825 if(put_user(txc.modes, &utp->modes) ||
2826 __put_user(txc.offset, &utp->offset) ||
2827 __put_user(txc.freq, &utp->freq) ||
2828 __put_user(txc.maxerror, &utp->maxerror) ||
2829 __put_user(txc.esterror, &utp->esterror) ||
2830 __put_user(txc.status, &utp->status) ||
2831 __put_user(txc.constant, &utp->constant) ||
2832 __put_user(txc.precision, &utp->precision) ||
2833 __put_user(txc.tolerance, &utp->tolerance) ||
2834 __put_user(txc.time.tv_sec, &utp->time.tv_sec) ||
2835 __put_user(txc.time.tv_usec, &utp->time.tv_usec) ||
2836 __put_user(txc.tick, &utp->tick) ||
2837 __put_user(txc.ppsfreq, &utp->ppsfreq) ||
2838 __put_user(txc.jitter, &utp->jitter) ||
2839 __put_user(txc.shift, &utp->shift) ||
2840 __put_user(txc.stabil, &utp->stabil) ||
2841 __put_user(txc.jitcnt, &utp->jitcnt) ||
2842 __put_user(txc.calcnt, &utp->calcnt) ||
2843 __put_user(txc.errcnt, &utp->errcnt) ||
2844 __put_user(txc.stbcnt, &utp->stbcnt))