5 #include <asm/segment.h>
9 #include <linux/preempt.h>
10 #include <linux/smp.h>
14 extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
16 struct Xgt_desc_struct {
18 unsigned long address __attribute__((packed));
20 } __attribute__ ((packed));
22 extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
24 #define load_TR_desc() __asm__ __volatile__("ltr %%ax"::"a" (GDT_ENTRY_TSS*8))
25 #define load_LDT_desc() __asm__ __volatile__("lldt %%ax"::"a" (GDT_ENTRY_LDT*8))
27 #define get_cpu_gdt_table(_cpu) ((struct desc_struct *)cpu_gdt_descr[(_cpu)].address)
30 * This is the ldt that every process will get unless we need
31 * something other than this.
33 extern struct desc_struct default_ldt[];
34 extern void set_intr_gate(unsigned int irq, void * addr);
36 #define _set_tssldt_desc(n,addr,limit,type) \
37 __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
38 "movw %%ax,2(%2)\n\t" \
39 "rorl $16,%%eax\n\t" \
40 "movb %%al,4(%2)\n\t" \
43 "movb %%ah,7(%2)\n\t" \
45 : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
47 static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
49 _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
50 offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
53 #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
55 static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
57 _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT],
58 (int)addr, ((size << 3)-1), 0x82);
61 #define LDT_entry_a(info) \
62 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
64 #define LDT_entry_b(info) \
65 (((info)->base_addr & 0xff000000) | \
66 (((info)->base_addr & 0x00ff0000) >> 16) | \
67 ((info)->limit & 0xf0000) | \
68 (((info)->read_exec_only ^ 1) << 9) | \
69 ((info)->contents << 10) | \
70 (((info)->seg_not_present ^ 1) << 15) | \
71 ((info)->seg_32bit << 22) | \
72 ((info)->limit_in_pages << 23) | \
73 ((info)->useable << 20) | \
76 #define LDT_empty(info) (\
77 (info)->base_addr == 0 && \
78 (info)->limit == 0 && \
79 (info)->contents == 0 && \
80 (info)->read_exec_only == 1 && \
81 (info)->seg_32bit == 0 && \
82 (info)->limit_in_pages == 0 && \
83 (info)->seg_not_present == 1 && \
84 (info)->useable == 0 )
87 # error update this code.
90 static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
92 #define C(i) HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), ((u32 *)&t->tls_array[i])[0], ((u32 *)&t->tls_array[i])[1])
97 static inline void clear_LDT(void)
102 * NB. We load the default_ldt for lcall7/27 handling on demand, as
103 * it slows down context switching. Noone uses it anyway.
105 cpu = cpu; /* XXX avoid compiler warning */
111 * load one particular LDT into the current CPU
113 static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
115 void *segments = pc->ldt;
116 int count = pc->size;
121 xen_set_ldt((unsigned long)segments, count);
124 static inline void load_LDT(mm_context_t *pc)
127 load_LDT_nolock(pc, cpu);
131 static inline unsigned long get_desc_base(unsigned long *desc)
134 base = ((desc[0] >> 16) & 0x0000ffff) |
135 ((desc[1] << 16) & 0x00ff0000) |
136 (desc[1] & 0xff000000);
140 static inline void set_user_cs(struct desc_struct *desc, unsigned long limit)
142 limit = (limit - 1) / PAGE_SIZE;
143 desc->a = limit & 0xffff;
144 desc->b = (limit & 0xf0000) | 0x00c0fb00;
147 #define load_user_cs_desc(cpu, mm) \
148 HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS]), (mm)->context.user_cs.a, (mm)->context.user_cs.b);
150 #endif /* !__ASSEMBLY__ */