2 * linux/arch/x86_64/kernel/ldt.c
4 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
5 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
6 * Copyright (C) 2002 Andi Kleen
8 * This handles calls from both 32bit and 64bit mode.
11 #include <linux/errno.h>
12 #include <linux/sched.h>
13 #include <linux/string.h>
15 #include <linux/smp.h>
16 #include <linux/smp_lock.h>
17 #include <linux/vmalloc.h>
18 #include <linux/slab.h>
20 #include <asm/uaccess.h>
21 #include <asm/system.h>
24 #include <asm/proto.h>
25 #include <asm/pgalloc.h>
27 #ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
28 static void flush_ldt(void *null)
30 if (current->active_mm)
31 load_LDT(¤t->active_mm->context);
35 static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
41 if (mincount <= (unsigned)pc->size)
44 mincount = (mincount+511)&(~511);
45 if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
46 newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
48 newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
54 memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
56 memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
70 (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
71 XENFEAT_writable_descriptor_tables);
74 mask = cpumask_of_cpu(smp_processor_id());
75 if (!cpus_equal(current->mm->cpu_vm_mask, mask))
76 smp_call_function(flush_ldt, NULL, 1, 1);
83 (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
84 XENFEAT_writable_descriptor_tables);
85 if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
93 static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
95 int err = alloc_ldt(new, old->size, 0);
98 memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
101 (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
102 XENFEAT_writable_descriptor_tables);
107 * we do not have to muck with descriptors here, that is
108 * done in switch_mm() as needed.
110 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
112 struct mm_struct * old_mm;
115 memset(&mm->context, 0, sizeof(mm->context));
116 init_MUTEX(&mm->context.sem);
117 old_mm = current->mm;
118 if (old_mm && old_mm->context.size > 0) {
119 down(&old_mm->context.sem);
120 retval = copy_ldt(&mm->context, &old_mm->context);
121 up(&old_mm->context.sem);
124 spin_lock(&mm_unpinned_lock);
125 list_add(&mm->context.unpinned, &mm_unpinned);
126 spin_unlock(&mm_unpinned_lock);
133 * Don't touch the LDT register - we're already in the next thread.
135 void destroy_context(struct mm_struct *mm)
137 if (mm->context.size) {
138 if (mm == current->active_mm)
142 (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
143 XENFEAT_writable_descriptor_tables);
144 if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
145 vfree(mm->context.ldt);
147 kfree(mm->context.ldt);
148 mm->context.size = 0;
150 if (!mm->context.pinned) {
151 spin_lock(&mm_unpinned_lock);
152 list_del(&mm->context.unpinned);
153 spin_unlock(&mm_unpinned_lock);
157 static int read_ldt(void __user * ptr, unsigned long bytecount)
161 struct mm_struct * mm = current->mm;
163 if (!mm->context.size)
165 if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
166 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
168 down(&mm->context.sem);
169 size = mm->context.size*LDT_ENTRY_SIZE;
170 if (size > bytecount)
174 if (copy_to_user(ptr, mm->context.ldt, size))
176 up(&mm->context.sem);
179 if (size != bytecount) {
180 /* zero-fill the rest */
181 if (clear_user(ptr+size, bytecount-size) != 0) {
191 static int read_default_ldt(void __user * ptr, unsigned long bytecount)
193 /* Arbitrary number */
194 /* x86-64 default LDT is all zeros */
197 if (clear_user(ptr, bytecount))
202 static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
204 struct task_struct *me = current;
205 struct mm_struct * mm = me->mm;
206 __u32 entry_1, entry_2, *lp;
207 unsigned long mach_lp;
209 struct user_desc ldt_info;
213 if (bytecount != sizeof(ldt_info))
216 if (copy_from_user(&ldt_info, ptr, bytecount))
220 if (ldt_info.entry_number >= LDT_ENTRIES)
222 if (ldt_info.contents == 3) {
225 if (ldt_info.seg_not_present == 0)
229 down(&mm->context.sem);
230 if (ldt_info.entry_number >= (unsigned)mm->context.size) {
231 error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1);
236 lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
237 mach_lp = arbitrary_virt_to_machine(lp);
239 /* Allow LDTs to be cleared by the user. */
240 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
241 if (oldmode || LDT_empty(&ldt_info)) {
248 entry_1 = LDT_entry_a(&ldt_info);
249 entry_2 = LDT_entry_b(&ldt_info);
251 entry_2 &= ~(1 << 20);
253 /* Install the new entry ... */
255 error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32)));
258 up(&mm->context.sem);
263 asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
269 ret = read_ldt(ptr, bytecount);
272 ret = write_ldt(ptr, bytecount, 1);
275 ret = read_default_ldt(ptr, bytecount);
278 ret = write_ldt(ptr, bytecount, 0);