/*
- * linux/kernel/ldt.c
+ * linux/arch/i386/kernel/ldt.c
*
* Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
- * Copyright (C) 1999, 2003 Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*/
#include <linux/errno.h>
#include <asm/system.h>
#include <asm/ldt.h>
#include <asm/desc.h>
-#include <linux/highmem.h>
-#include <asm/atomic_kmap.h>
+#include <asm/mmu_context.h>
#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
static void flush_ldt(void *null)
static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
{
- int oldsize, newsize, i;
+ void *oldldt;
+ void *newldt;
+ int oldsize;
if (mincount <= pc->size)
return 0;
- /*
- * LDT got larger - reallocate if necessary.
- */
oldsize = pc->size;
mincount = (mincount+511)&(~511);
- newsize = mincount*LDT_ENTRY_SIZE;
- for (i = 0; i < newsize; i += PAGE_SIZE) {
- int nr = i/PAGE_SIZE;
- BUG_ON(i >= 64*1024);
- if (!pc->ldt_pages[nr]) {
- pc->ldt_pages[nr] = alloc_page(GFP_HIGHUSER);
- if (!pc->ldt_pages[nr])
- return -ENOMEM;
- clear_highpage(pc->ldt_pages[nr]);
- }
- }
+ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
+ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
+ else
+ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
+
+ if (!newldt)
+ return -ENOMEM;
+
+ if (oldsize)
+ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
+ oldldt = pc->ldt;
+ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
+ pc->ldt = newldt;
+ wmb();
pc->size = mincount;
+ wmb();
+
if (reload) {
#ifdef CONFIG_SMP
cpumask_t mask;
-
preempt_disable();
load_LDT(pc);
mask = cpumask_of_cpu(smp_processor_id());
if (!cpus_equal(current->mm->cpu_vm_mask, mask))
- smp_call_function(flush_ldt, 0, 1, 1);
+ smp_call_function(flush_ldt, NULL, 1, 1);
preempt_enable();
#else
load_LDT(pc);
#endif
}
+ if (oldsize) {
+ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(oldldt);
+ else
+ kfree(oldldt);
+ }
return 0;
}
static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
{
- int i, err, size = old->size, nr_pages = (size*LDT_ENTRY_SIZE + PAGE_SIZE-1)/PAGE_SIZE;
-
- err = alloc_ldt(new, size, 0);
- if (err < 0) {
- new->size = 0;
+ int err = alloc_ldt(new, old->size, 0);
+ if (err < 0)
return err;
- }
- for (i = 0; i < nr_pages; i++)
- copy_user_highpage(new->ldt_pages[i], old->ldt_pages[i], 0);
+ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
return 0;
}
init_MUTEX(&mm->context.sem);
mm->context.size = 0;
- memset(mm->context.ldt_pages, 0, sizeof(struct page *) * MAX_LDT_PAGES);
old_mm = current->mm;
if (old_mm && old_mm->context.size > 0) {
down(&old_mm->context.sem);
/*
* No need to lock the MM as we are the last user
- * Do not touch the ldt register, we are already
- * in the next thread.
*/
void destroy_context(struct mm_struct *mm)
{
- int i, nr_pages = (mm->context.size*LDT_ENTRY_SIZE + PAGE_SIZE-1) / PAGE_SIZE;
-
- for (i = 0; i < nr_pages; i++)
- __free_page(mm->context.ldt_pages[i]);
- mm->context.size = 0;
+ if (mm->context.size) {
+ if (mm == current->active_mm)
+ clear_LDT();
+ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(mm->context.ldt);
+ else
+ kfree(mm->context.ldt);
+ mm->context.size = 0;
+ }
}
static int read_ldt(void __user * ptr, unsigned long bytecount)
{
- int err, i;
+ int err;
unsigned long size;
struct mm_struct * mm = current->mm;
size = bytecount;
err = 0;
- /*
- * This is necessary just in case we got here straight from a
- * context-switch where the ptes were set but no tlb flush
- * was done yet. We rather avoid doing a TLB flush in the
- * context-switch path and do it here instead.
- */
- __flush_tlb_global();
-
- for (i = 0; i < size; i += PAGE_SIZE) {
- int nr = i / PAGE_SIZE, bytes;
- char *kaddr = kmap(mm->context.ldt_pages[nr]);
-
- bytes = size - i;
- if (bytes > PAGE_SIZE)
- bytes = PAGE_SIZE;
- if (copy_to_user(ptr + i, kaddr, size - i))
- err = -EFAULT;
- kunmap(mm->context.ldt_pages[nr]);
- }
+ if (copy_to_user(ptr, mm->context.ldt, size))
+ err = -EFAULT;
up(&mm->context.sem);
if (err < 0)
- return err;
+ goto error_return;
if (size != bytecount) {
/* zero-fill the rest */
- clear_user(ptr+size, bytecount-size);
+ if (clear_user(ptr+size, bytecount-size) != 0) {
+ err = -EFAULT;
+ goto error_return;
+ }
}
return bytecount;
+error_return:
+ return err;
}
static int read_default_ldt(void __user * ptr, unsigned long bytecount)
{
int err;
unsigned long size;
- void *address;
err = 0;
- address = &default_ldt[0];
- size = 5*LDT_ENTRY_SIZE;
+ size = 5*sizeof(struct desc_struct);
if (size > bytecount)
size = bytecount;
err = size;
- if (copy_to_user(ptr, address, size))
+ if (clear_user(ptr, size))
err = -EFAULT;
return err;
static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
{
struct mm_struct * mm = current->mm;
- __u32 entry_1, entry_2, *lp;
+ __u32 entry_1, entry_2;
int error;
struct user_desc ldt_info;
goto out_unlock;
}
- /*
- * No rescheduling allowed from this point to the install.
- *
- * We do a TLB flush for the same reason as in the read_ldt() path.
- */
- preempt_disable();
- __flush_tlb_global();
- lp = (__u32 *) ((ldt_info.entry_number << 3) +
- (char *) __kmap_atomic_vaddr(KM_LDT_PAGE0));
-
/* Allow LDTs to be cleared by the user. */
if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
if (oldmode || LDT_empty(&ldt_info)) {
/* Install the new entry ... */
install:
- *lp = entry_1;
- *(lp+1) = entry_2;
+ write_ldt_entry(mm->context.ldt, ldt_info.entry_number, entry_1, entry_2);
error = 0;
- preempt_enable();
out_unlock:
up(&mm->context.sem);
}
return ret;
}
-
-/*
- * load one particular LDT into the current CPU
- */
-void load_LDT_nolock(mm_context_t *pc, int cpu)
-{
- struct page **pages = pc->ldt_pages;
- int count = pc->size;
- int nr_pages, i;
-
- if (likely(!count)) {
- pages = &default_ldt_page;
- count = 5;
- }
- nr_pages = (count*LDT_ENTRY_SIZE + PAGE_SIZE-1) / PAGE_SIZE;
-
- for (i = 0; i < nr_pages; i++) {
- __kunmap_atomic_type(KM_LDT_PAGE0 - i);
- __kmap_atomic(pages[i], KM_LDT_PAGE0 - i);
- }
- set_ldt_desc(cpu, (void *)__kmap_atomic_vaddr(KM_LDT_PAGE0), count);
- load_LDT_desc();
-}