* linux/kernel/ldt.c
*
* Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
- * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 1999, 2003 Ingo Molnar <mingo@redhat.com>
*/
#include <linux/errno.h>
#include <asm/system.h>
#include <asm/ldt.h>
#include <asm/desc.h>
+#include <linux/highmem.h>
+#include <asm/atomic_kmap.h>
#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
static void flush_ldt(void *null)
static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
{
- void *oldldt;
- void *newldt;
- int oldsize;
+ int oldsize, newsize, i;
if (mincount <= pc->size)
return 0;
+ /*
+ * LDT got larger - reallocate if necessary.
+ */
oldsize = pc->size;
mincount = (mincount+511)&(~511);
- if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
- newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
- else
- newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
-
- if (!newldt)
- return -ENOMEM;
-
- if (oldsize)
- memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
- oldldt = pc->ldt;
- memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
- pc->ldt = newldt;
- wmb();
+ newsize = mincount*LDT_ENTRY_SIZE;
+ for (i = 0; i < newsize; i += PAGE_SIZE) {
+ int nr = i/PAGE_SIZE;
+ BUG_ON(i >= 64*1024);
+ if (!pc->ldt_pages[nr]) {
+ pc->ldt_pages[nr] = alloc_page(GFP_HIGHUSER);
+ if (!pc->ldt_pages[nr])
+ return -ENOMEM;
+ clear_highpage(pc->ldt_pages[nr]);
+ }
+ }
pc->size = mincount;
- wmb();
-
if (reload) {
#ifdef CONFIG_SMP
cpumask_t mask;
+
preempt_disable();
load_LDT(pc);
mask = cpumask_of_cpu(smp_processor_id());
load_LDT(pc);
#endif
}
- if (oldsize) {
- if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(oldldt);
- else
- kfree(oldldt);
- }
return 0;
}
static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
{
- int err = alloc_ldt(new, old->size, 0);
- if (err < 0)
+ int i, err, size = old->size, nr_pages = (size*LDT_ENTRY_SIZE + PAGE_SIZE-1)/PAGE_SIZE;
+
+ err = alloc_ldt(new, size, 0);
+ if (err < 0) {
+ new->size = 0;
return err;
- memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
+ }
+ for (i = 0; i < nr_pages; i++)
+ copy_user_highpage(new->ldt_pages[i], old->ldt_pages[i], 0);
return 0;
}
init_MUTEX(&mm->context.sem);
mm->context.size = 0;
+ memset(mm->context.ldt_pages, 0, sizeof(struct page *) * MAX_LDT_PAGES);
old_mm = current->mm;
if (old_mm && old_mm->context.size > 0) {
down(&old_mm->context.sem);
/*
* No need to lock the MM as we are the last user
+ * Do not touch the ldt register, we are already
+ * in the next thread.
*/
void destroy_context(struct mm_struct *mm)
{
- if (mm->context.size) {
- if (mm == current->active_mm)
- clear_LDT();
- if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(mm->context.ldt);
- else
- kfree(mm->context.ldt);
- mm->context.size = 0;
- }
+ int i, nr_pages = (mm->context.size*LDT_ENTRY_SIZE + PAGE_SIZE-1) / PAGE_SIZE;
+
+ for (i = 0; i < nr_pages; i++)
+ __free_page(mm->context.ldt_pages[i]);
+ mm->context.size = 0;
}
static int read_ldt(void __user * ptr, unsigned long bytecount)
{
- int err;
+ int err, i;
unsigned long size;
struct mm_struct * mm = current->mm;
size = bytecount;
err = 0;
- if (copy_to_user(ptr, mm->context.ldt, size))
- err = -EFAULT;
+ /*
+ * This is necessary just in case we got here straight from a
+ * context-switch where the ptes were set but no tlb flush
+ * was done yet. We rather avoid doing a TLB flush in the
+ * context-switch path and do it here instead.
+ */
+ __flush_tlb_global();
+
+ for (i = 0; i < size; i += PAGE_SIZE) {
+ int nr = i / PAGE_SIZE, bytes;
+ char *kaddr = kmap(mm->context.ldt_pages[nr]);
+
+ bytes = size - i;
+ if (bytes > PAGE_SIZE)
+ bytes = PAGE_SIZE;
+ if (copy_to_user(ptr + i, kaddr, size - i))
+ err = -EFAULT;
+ kunmap(mm->context.ldt_pages[nr]);
+ }
up(&mm->context.sem);
if (err < 0)
return err;
err = 0;
address = &default_ldt[0];
- size = 5*sizeof(struct desc_struct);
+ size = 5*LDT_ENTRY_SIZE;
if (size > bytecount)
size = bytecount;
goto out_unlock;
}
- lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
+ /*
+ * No rescheduling allowed from this point to the install.
+ *
+ * We do a TLB flush for the same reason as in the read_ldt() path.
+ */
+ preempt_disable();
+ __flush_tlb_global();
+ lp = (__u32 *) ((ldt_info.entry_number << 3) +
+ (char *) __kmap_atomic_vaddr(KM_LDT_PAGE0));
/* Allow LDTs to be cleared by the user. */
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
*lp = entry_1;
*(lp+1) = entry_2;
error = 0;
+ preempt_enable();
out_unlock:
up(&mm->context.sem);
}
return ret;
}
+
+/*
+ * load one particular LDT into the current CPU
+ */
+void load_LDT_nolock(mm_context_t *pc, int cpu)
+{
+ struct page **pages = pc->ldt_pages;
+ int count = pc->size;
+ int nr_pages, i;
+
+ if (likely(!count)) {
+ pages = &default_ldt_page;
+ count = 5;
+ }
+ nr_pages = (count*LDT_ENTRY_SIZE + PAGE_SIZE-1) / PAGE_SIZE;
+
+ for (i = 0; i < nr_pages; i++) {
+ __kunmap_atomic_type(KM_LDT_PAGE0 - i);
+ __kmap_atomic(pages[i], KM_LDT_PAGE0 - i);
+ }
+ set_ldt_desc(cpu, (void *)__kmap_atomic_vaddr(KM_LDT_PAGE0), count);
+ load_LDT_desc();
+}