X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fx86_64%2Fkernel%2Finit_task.c;h=9c141e1a6b2fef483baca0cc43a264458fe762ca;hb=4e76c8a9fa413ccc09d3f7f664183dcce3555d57;hp=8bc4becb8459b887e8e9da311922b6f5e42a29de;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/arch/x86_64/kernel/init_task.c b/arch/x86_64/kernel/init_task.c index 8bc4becb8..9c141e1a6 100644 --- a/arch/x86_64/kernel/init_task.c +++ b/arch/x86_64/kernel/init_task.c @@ -37,6 +37,8 @@ union thread_union init_thread_union struct task_struct init_task = INIT_TASK(init_task); EXPORT_SYMBOL(init_task); + +#ifndef CONFIG_X86_NO_TSS /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, * no more per-task TSS's. The TSS size is kept cacheline-aligned @@ -44,8 +46,7 @@ EXPORT_SYMBOL(init_task); * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ -DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_maxaligned_in_smp; +DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; +#endif #define ALIGN_TO_4K __attribute__((section(".data.init_task"))) - -pgd_t boot_vmalloc_pgt[512] ALIGN_TO_4K;