4 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
5 * Copyright (C) 1999, 2003 Ingo Molnar <mingo@redhat.com>
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/string.h>
12 #include <linux/smp.h>
13 #include <linux/smp_lock.h>
14 #include <linux/vmalloc.h>
15 #include <linux/slab.h>
17 #include <asm/uaccess.h>
18 #include <asm/system.h>
21 #include <linux/highmem.h>
22 #include <asm/atomic_kmap.h>
24 #ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
25 static void flush_ldt(void *null)
27 if (current->active_mm)
28 load_LDT(¤t->active_mm->context);
32 static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
34 int oldsize, newsize, i;
36 if (mincount <= pc->size)
39 * LDT got larger - reallocate if necessary.
42 mincount = (mincount+511)&(~511);
43 newsize = mincount*LDT_ENTRY_SIZE;
44 for (i = 0; i < newsize; i += PAGE_SIZE) {
47 if (!pc->ldt_pages[nr]) {
48 pc->ldt_pages[nr] = alloc_page(GFP_HIGHUSER);
49 if (!pc->ldt_pages[nr])
51 clear_highpage(pc->ldt_pages[nr]);
61 mask = cpumask_of_cpu(smp_processor_id());
62 if (!cpus_equal(current->mm->cpu_vm_mask, mask))
63 smp_call_function(flush_ldt, NULL, 1, 1);
72 static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
74 int i, err, size = old->size, nr_pages = (size*LDT_ENTRY_SIZE + PAGE_SIZE-1)/PAGE_SIZE;
76 err = alloc_ldt(new, size, 0);
81 for (i = 0; i < nr_pages; i++)
82 copy_user_highpage(new->ldt_pages[i], old->ldt_pages[i], 0);
87 * we do not have to muck with descriptors here, that is
88 * done in switch_mm() as needed.
90 int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
92 struct mm_struct * old_mm;
95 init_MUTEX(&mm->context.sem);
97 memset(mm->context.ldt_pages, 0, sizeof(struct page *) * MAX_LDT_PAGES);
99 if (old_mm && old_mm->context.size > 0) {
100 down(&old_mm->context.sem);
101 retval = copy_ldt(&mm->context, &old_mm->context);
102 up(&old_mm->context.sem);
108 * No need to lock the MM as we are the last user
109 * Do not touch the ldt register, we are already
110 * in the next thread.
112 void destroy_context(struct mm_struct *mm)
114 int i, nr_pages = (mm->context.size*LDT_ENTRY_SIZE + PAGE_SIZE-1) / PAGE_SIZE;
116 for (i = 0; i < nr_pages; i++)
117 __free_page(mm->context.ldt_pages[i]);
118 mm->context.size = 0;
121 static int read_ldt(void __user * ptr, unsigned long bytecount)
125 struct mm_struct * mm = current->mm;
127 if (!mm->context.size)
129 if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
130 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
132 down(&mm->context.sem);
133 size = mm->context.size*LDT_ENTRY_SIZE;
134 if (size > bytecount)
139 * This is necessary just in case we got here straight from a
140 * context-switch where the ptes were set but no tlb flush
141 * was done yet. We rather avoid doing a TLB flush in the
142 * context-switch path and do it here instead.
144 __flush_tlb_global();
146 for (i = 0; i < size; i += PAGE_SIZE) {
147 int nr = i / PAGE_SIZE, bytes;
148 char *kaddr = kmap(mm->context.ldt_pages[nr]);
151 if (bytes > PAGE_SIZE)
153 if (copy_to_user(ptr + i, kaddr, bytes))
155 kunmap(mm->context.ldt_pages[nr]);
157 up(&mm->context.sem);
160 if (size != bytecount) {
161 /* zero-fill the rest */
162 clear_user(ptr+size, bytecount-size);
167 static int read_default_ldt(void __user * ptr, unsigned long bytecount)
174 address = &default_ldt[0];
175 size = 5*LDT_ENTRY_SIZE;
176 if (size > bytecount)
180 if (copy_to_user(ptr, address, size))
186 static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
188 struct mm_struct * mm = current->mm;
189 __u32 entry_1, entry_2, *lp;
191 struct user_desc ldt_info;
194 if (bytecount != sizeof(ldt_info))
197 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
201 if (ldt_info.entry_number >= LDT_ENTRIES)
203 if (ldt_info.contents == 3) {
206 if (ldt_info.seg_not_present == 0)
210 down(&mm->context.sem);
211 if (ldt_info.entry_number >= mm->context.size) {
212 error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1);
218 * No rescheduling allowed from this point to the install.
220 * We do a TLB flush for the same reason as in the read_ldt() path.
223 __flush_tlb_global();
224 lp = (__u32 *) ((ldt_info.entry_number << 3) +
225 (char *) __kmap_atomic_vaddr(KM_LDT_PAGE0));
227 /* Allow LDTs to be cleared by the user. */
228 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
229 if (oldmode || LDT_empty(&ldt_info)) {
236 entry_1 = LDT_entry_a(&ldt_info);
237 entry_2 = LDT_entry_b(&ldt_info);
239 entry_2 &= ~(1 << 20);
241 /* Install the new entry ... */
249 up(&mm->context.sem);
254 asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
260 ret = read_ldt(ptr, bytecount);
263 ret = write_ldt(ptr, bytecount, 1);
266 ret = read_default_ldt(ptr, bytecount);
269 ret = write_ldt(ptr, bytecount, 0);
276 * load one particular LDT into the current CPU
278 void load_LDT_nolock(mm_context_t *pc, int cpu)
280 struct page **pages = pc->ldt_pages;
281 int count = pc->size;
284 if (likely(!count)) {
285 pages = &default_ldt_page;
288 nr_pages = (count*LDT_ENTRY_SIZE + PAGE_SIZE-1) / PAGE_SIZE;
290 for (i = 0; i < nr_pages; i++) {
291 __kunmap_atomic_type(KM_LDT_PAGE0 - i);
292 __kmap_atomic(pages[i], KM_LDT_PAGE0 - i);
294 set_ldt_desc(cpu, (void *)__kmap_atomic_vaddr(KM_LDT_PAGE0), count);