VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / arch / ia64 / ia32 / binfmt_elf32.c
1 /*
2  * IA-32 ELF support.
3  *
4  * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
5  * Copyright (C) 2001 Hewlett-Packard Co
6  *      David Mosberger-Tang <davidm@hpl.hp.com>
7  *
8  * 06/16/00     A. Mallick      initialize csd/ssd/tssd/cflg for ia32_load_state
9  * 04/13/01     D. Mosberger    dropped saving tssd in ar.k1---it's not needed
10  * 09/14/01     D. Mosberger    fixed memory management for gdt/tss page
11  */
12 #include <linux/config.h>
13
14 #include <linux/types.h>
15 #include <linux/mm.h>
16 #include <linux/security.h>
17 #include <linux/vs_memory.h>
18
19 #include <asm/param.h>
20 #include <asm/signal.h>
21
22 #include "ia32priv.h"
23 #include "elfcore32.h"
24
25 /* Override some function names */
26 #undef start_thread
27 #define start_thread                    ia32_start_thread
28 #define elf_format                      elf32_format
29 #define init_elf_binfmt                 init_elf32_binfmt
30 #define exit_elf_binfmt                 exit_elf32_binfmt
31
32 #undef CLOCKS_PER_SEC
33 #define CLOCKS_PER_SEC  IA32_CLOCKS_PER_SEC
34
35 extern void ia64_elf32_init (struct pt_regs *regs);
36
37 static void elf32_set_personality (void);
38
39 #define setup_arg_pages(bprm,exec)              ia32_setup_arg_pages(bprm,exec)
40 #define elf_map                         elf32_map
41
42 #undef SET_PERSONALITY
43 #define SET_PERSONALITY(ex, ibcs2)      elf32_set_personality()
44
45 #define elf_read_implies_exec(ex, have_pt_gnu_stack)    (!(have_pt_gnu_stack))
46
47 /* Ugly but avoids duplication */
48 #include "../../../fs/binfmt_elf.c"
49
50 extern struct page *ia32_shared_page[];
51 extern unsigned long *ia32_gdt;
52
53 struct page *
54 ia32_install_shared_page (struct vm_area_struct *vma, unsigned long address, int *type)
55 {
56         struct page *pg = ia32_shared_page[smp_processor_id()];
57         get_page(pg);
58         if (type)
59                 *type = VM_FAULT_MINOR;
60         return pg;
61 }
62
63 static struct vm_operations_struct ia32_shared_page_vm_ops = {
64         .nopage = ia32_install_shared_page
65 };
66
67 void
68 ia64_elf32_init (struct pt_regs *regs)
69 {
70         struct vm_area_struct *vma;
71
72         /*
73          * Map GDT below 4GB, where the processor can find it.  We need to map
74          * it with privilege level 3 because the IVE uses non-privileged accesses to these
75          * tables.  IA-32 segmentation is used to protect against IA-32 accesses to them.
76          */
77         vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
78         if (vma) {
79                 memset(vma, 0, sizeof(*vma));
80                 vma->vm_mm = current->mm;
81                 vma->vm_start = IA32_GDT_OFFSET;
82                 vma->vm_end = vma->vm_start + PAGE_SIZE;
83                 vma->vm_page_prot = PAGE_SHARED;
84                 vma->vm_flags = VM_READ|VM_MAYREAD|VM_RESERVED;
85                 vma->vm_ops = &ia32_shared_page_vm_ops;
86                 down_write(&current->mm->mmap_sem);
87                 {
88                         insert_vm_struct(current->mm, vma);
89                 }
90                 up_write(&current->mm->mmap_sem);
91         }
92
93         /*
94          * Install LDT as anonymous memory.  This gives us all-zero segment descriptors
95          * until a task modifies them via modify_ldt().
96          */
97         vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
98         if (vma) {
99                 memset(vma, 0, sizeof(*vma));
100                 vma->vm_mm = current->mm;
101                 vma->vm_start = IA32_LDT_OFFSET;
102                 vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
103                 vma->vm_page_prot = PAGE_SHARED;
104                 vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE;
105                 down_write(&current->mm->mmap_sem);
106                 {
107                         insert_vm_struct(current->mm, vma);
108                 }
109                 up_write(&current->mm->mmap_sem);
110         }
111
112         ia64_psr(regs)->ac = 0;         /* turn off alignment checking */
113         regs->loadrs = 0;
114         /*
115          *  According to the ABI %edx points to an `atexit' handler.  Since we don't have
116          *  one we'll set it to 0 and initialize all the other registers just to make
117          *  things more deterministic, ala the i386 implementation.
118          */
119         regs->r8 = 0;   /* %eax */
120         regs->r11 = 0;  /* %ebx */
121         regs->r9 = 0;   /* %ecx */
122         regs->r10 = 0;  /* %edx */
123         regs->r13 = 0;  /* %ebp */
124         regs->r14 = 0;  /* %esi */
125         regs->r15 = 0;  /* %edi */
126
127         current->thread.eflag = IA32_EFLAG;
128         current->thread.fsr = IA32_FSR_DEFAULT;
129         current->thread.fcr = IA32_FCR_DEFAULT;
130         current->thread.fir = 0;
131         current->thread.fdr = 0;
132
133         /*
134          * Setup GDTD.  Note: GDTD is the descrambled version of the pseudo-descriptor
135          * format defined by Figure 3-11 "Pseudo-Descriptor Format" in the IA-32
136          * architecture manual. Also note that the only fields that are not ignored are
137          * `base', `limit', 'G', `P' (must be 1) and `S' (must be 0).
138          */
139         regs->r31 = IA32_SEG_UNSCRAMBLE(IA32_SEG_DESCRIPTOR(IA32_GDT_OFFSET, IA32_PAGE_SIZE - 1,
140                                                             0, 0, 0, 1, 0, 0, 0));
141         /* Setup the segment selectors */
142         regs->r16 = (__USER_DS << 16) | __USER_DS; /* ES == DS, GS, FS are zero */
143         regs->r17 = (__USER_DS << 16) | __USER_CS; /* SS, CS; ia32_load_state() sets TSS and LDT */
144
145         ia32_load_segment_descriptors(current);
146         ia32_load_state(current);
147 }
148
149 int
150 ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
151 {
152         unsigned long stack_base, grow;
153         struct vm_area_struct *mpnt;
154         struct mm_struct *mm = current->mm;
155         int i;
156
157         stack_base = IA32_STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
158         mm->arg_start = bprm->p + stack_base;
159
160         bprm->p += stack_base;
161         if (bprm->loader)
162                 bprm->loader += stack_base;
163         bprm->exec += stack_base;
164
165         mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
166         if (!mpnt)
167                 return -ENOMEM;
168
169         grow = (IA32_STACK_TOP - (PAGE_MASK & (unsigned long) bprm->p))
170                 >> PAGE_SHIFT;
171         if (security_vm_enough_memory(grow) ||
172                 !vx_vmpages_avail(mm, grow)) {
173                 kmem_cache_free(vm_area_cachep, mpnt);
174                 return -ENOMEM;
175         }
176
177         memset(mpnt, 0, sizeof(*mpnt));
178
179         down_write(&current->mm->mmap_sem);
180         {
181                 mpnt->vm_mm = current->mm;
182                 mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
183                 mpnt->vm_end = IA32_STACK_TOP;
184                 if (executable_stack == EXSTACK_ENABLE_X)
185                         mpnt->vm_flags = VM_STACK_FLAGS |  VM_EXEC;
186                 else if (executable_stack == EXSTACK_DISABLE_X)
187                         mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
188                 else
189                         mpnt->vm_flags = VM_STACK_FLAGS;
190                 mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC)?
191                                         PAGE_COPY_EXEC: PAGE_COPY;
192                 insert_vm_struct(current->mm, mpnt);
193                 // current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
194                 vx_vmpages_sub(current->mm, current->mm->total_vm -
195                         ((mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT));
196         }
197
198         for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
199                 struct page *page = bprm->page[i];
200                 if (page) {
201                         bprm->page[i] = NULL;
202                         install_arg_page(mpnt, page, stack_base);
203                 }
204                 stack_base += PAGE_SIZE;
205         }
206         up_write(&current->mm->mmap_sem);
207
208         /* Can't do it in ia64_elf32_init(). Needs to be done before calls to
209            elf32_map() */
210         current->thread.ppl = ia32_init_pp_list();
211
212         return 0;
213 }
214
215 static void
216 elf32_set_personality (void)
217 {
218         set_personality(PER_LINUX32);
219         current->thread.map_base  = IA32_PAGE_OFFSET/3;
220         current->thread.task_size = IA32_PAGE_OFFSET;   /* use what Linux/x86 uses... */
221         set_fs(USER_DS);                                /* set addr limit for new TASK_SIZE */
222 }
223
224 static unsigned long
225 elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)
226 {
227         unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK;
228
229         return ia32_do_mmap(filep, (addr & IA32_PAGE_MASK), eppnt->p_filesz + pgoff, prot, type,
230                             eppnt->p_offset - pgoff);
231 }
232
233 #define cpu_uses_ia32el()       (local_cpu_data->family > 0x1f)
234
235 static int __init check_elf32_binfmt(void)
236 {
237         if (cpu_uses_ia32el()) {
238                 printk("Please use IA-32 EL for executing IA-32 binaries\n");
239                 return unregister_binfmt(&elf_format);
240         }
241         return 0;
242 }
243
244 module_init(check_elf32_binfmt)