+++ /dev/null
-#
-# Makefile for the linux kernel.
-#
-
-extra-y := head.o head64.o init_task.o vmlinux.lds.s
-EXTRA_AFLAGS := -traditional
-obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
- ptrace.o i8259.o ioport.o ldt.o setup.o time.o sys_x86_64.o \
- x8664_ksyms.o i387.o syscall.o vsyscall.o \
- setup64.o bootflag.o e820.o reboot.o warmreboot.o
-obj-y += mce.o
-
-obj-$(CONFIG_MTRR) += ../../i386/kernel/cpu/mtrr/
-obj-$(CONFIG_ACPI_BOOT) += acpi/
-obj-$(CONFIG_X86_MSR) += msr.o
-obj-$(CONFIG_MICROCODE) += microcode.o
-obj-$(CONFIG_X86_CPUID) += cpuid.o
-obj-$(CONFIG_SMP) += smp.o smpboot.o trampoline.o
-obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o
-obj-$(CONFIG_X86_IO_APIC) += io_apic.o mpparse.o
-obj-$(CONFIG_PM) += suspend.o
-obj-$(CONFIG_SOFTWARE_SUSPEND) += suspend_asm.o
-obj-$(CONFIG_CPU_FREQ) += cpufreq/
-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_GART_IOMMU) += pci-gart.o aperture.o
-obj-$(CONFIG_DUMMY_IOMMU) += pci-nommu.o pci-dma.o
-obj-$(CONFIG_SWIOTLB) += swiotlb.o
-obj-$(CONFIG_SCHED_SMT) += domain.o
-
-obj-$(CONFIG_MODULES) += module.o
-
-obj-y += topology.o
-
-bootflag-y += ../../i386/kernel/bootflag.o
-cpuid-$(subst m,y,$(CONFIG_X86_CPUID)) += ../../i386/kernel/cpuid.o
-topology-y += ../../i386/mach-default/topology.o
-swiotlb-$(CONFIG_SWIOTLB) += ../../ia64/lib/swiotlb.o
-microcode-$(subst m,y,$(CONFIG_MICROCODE)) += ../../i386/kernel/microcode.o
+++ /dev/null
-#include <linux/init.h>
-#include <linux/sched.h>
-
-/* Don't do any NUMA setup on Opteron right now. They seem to be
- better off with flat scheduling. This is just for SMT. */
-
-#ifdef CONFIG_SCHED_SMT
-
-static struct sched_group sched_group_cpus[NR_CPUS];
-static struct sched_group sched_group_phys[NR_CPUS];
-static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
-static DEFINE_PER_CPU(struct sched_domain, phys_domains);
-__init void arch_init_sched_domains(void)
-{
- int i;
- struct sched_group *first = NULL, *last = NULL;
-
- /* Set up domains */
- for_each_cpu(i) {
- struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
- struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
-
- *cpu_domain = SD_SIBLING_INIT;
- /* Disable SMT NICE for CMP */
- /* RED-PEN use a generic flag */
- if (cpu_data[i].x86_vendor == X86_VENDOR_AMD)
- cpu_domain->flags &= ~SD_SHARE_CPUPOWER;
- cpu_domain->span = cpu_sibling_map[i];
- cpu_domain->parent = phys_domain;
- cpu_domain->groups = &sched_group_cpus[i];
-
- *phys_domain = SD_CPU_INIT;
- phys_domain->span = cpu_possible_map;
- phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
- }
-
- /* Set up CPU (sibling) groups */
- for_each_cpu(i) {
- struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
- int j;
- first = last = NULL;
-
- if (i != first_cpu(cpu_domain->span))
- continue;
-
- for_each_cpu_mask(j, cpu_domain->span) {
- struct sched_group *cpu = &sched_group_cpus[j];
-
- cpus_clear(cpu->cpumask);
- cpu_set(j, cpu->cpumask);
- cpu->cpu_power = SCHED_LOAD_SCALE;
-
- if (!first)
- first = cpu;
- if (last)
- last->next = cpu;
- last = cpu;
- }
- last->next = first;
- }
-
- first = last = NULL;
- /* Set up physical groups */
- for_each_cpu(i) {
- struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
- struct sched_group *cpu = &sched_group_phys[i];
-
- if (i != first_cpu(cpu_domain->span))
- continue;
-
- cpu->cpumask = cpu_domain->span;
- /*
- * Make each extra sibling increase power by 10% of
- * the basic CPU. This is very arbitrary.
- */
- cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
-
- if (!first)
- first = cpu;
- if (last)
- last->next = cpu;
- last = cpu;
- }
- last->next = first;
-
- mb();
- for_each_cpu(i) {
- struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
- cpu_attach_domain(cpu_domain, i);
- }
-}
-
-#endif