X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fx86_64%2Fkernel%2Finit_task.c;h=86bc43b04679a09477962ec39526d5b97c2d0e70;hb=refs%2Fheads%2Fvserver;hp=33185ca33f6a0ad8718446159a5a4c345dc01cbb;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/arch/x86_64/kernel/init_task.c b/arch/x86_64/kernel/init_task.c index 33185ca33..86bc43b04 100644 --- a/arch/x86_64/kernel/init_task.c +++ b/arch/x86_64/kernel/init_task.c @@ -37,6 +37,8 @@ union thread_union init_thread_union struct task_struct init_task = INIT_TASK(init_task); EXPORT_SYMBOL(init_task); + +#ifndef CONFIG_X86_NO_TSS /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, * no more per-task TSS's. The TSS size is kept cacheline-aligned @@ -44,9 +46,12 @@ EXPORT_SYMBOL(init_task); * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ -struct tss_struct init_tss[NR_CPUS] __cacheline_aligned; +DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; +#endif +/* Copies of the original ist values from the tss are only accessed during + * debugging, no special alignment required. + */ +DEFINE_PER_CPU(struct orig_ist, orig_ist); #define ALIGN_TO_4K __attribute__((section(".data.init_task"))) - -pgd_t boot_vmalloc_pgt[512] ALIGN_TO_4K;