linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / arch / powerpc / kernel / setup_64.c
index 4467c49..abd758f 100644 (file)
@@ -73,6 +73,7 @@
 
 int have_of = 1;
 int boot_cpuid = 0;
+int boot_cpuid_phys = 0;
 dev_t boot_dev;
 u64 ppc64_pft_size;
 
@@ -95,6 +96,11 @@ int dcache_bsize;
 int icache_bsize;
 int ucache_bsize;
 
+/* The main machine-dep calls structure
+ */
+struct machdep_calls ppc_md;
+EXPORT_SYMBOL(ppc_md);
+
 #ifdef CONFIG_MAGIC_SYSRQ
 unsigned long SYSRQ_KEY;
 #endif /* CONFIG_MAGIC_SYSRQ */
@@ -155,6 +161,32 @@ early_param("smt-enabled", early_smt_enabled);
 #define check_smt_enabled()
 #endif /* CONFIG_SMP */
 
+extern struct machdep_calls pSeries_md;
+extern struct machdep_calls pmac_md;
+extern struct machdep_calls maple_md;
+extern struct machdep_calls cell_md;
+extern struct machdep_calls iseries_md;
+
+/* Ultimately, stuff them in an elf section like initcalls... */
+static struct machdep_calls __initdata *machines[] = {
+#ifdef CONFIG_PPC_PSERIES
+       &pSeries_md,
+#endif /* CONFIG_PPC_PSERIES */
+#ifdef CONFIG_PPC_PMAC
+       &pmac_md,
+#endif /* CONFIG_PPC_PMAC */
+#ifdef CONFIG_PPC_MAPLE
+       &maple_md,
+#endif /* CONFIG_PPC_MAPLE */
+#ifdef CONFIG_PPC_CELL
+       &cell_md,
+#endif
+#ifdef CONFIG_PPC_ISERIES
+       &iseries_md,
+#endif
+       NULL
+};
+
 /*
  * Early initialization entry point. This is called by head.S
  * with MMU translation disabled. We rely on the "feature" of
@@ -176,10 +208,13 @@ early_param("smt-enabled", early_smt_enabled);
 
 void __init early_setup(unsigned long dt_ptr)
 {
+       struct paca_struct *lpaca = get_paca();
+       static struct machdep_calls **mach;
+
        /* Enable early debugging if any specified (see udbg.h) */
        udbg_early_init();
 
-       DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
+       DBG(" -> early_setup()\n");
 
        /*
         * Do early initializations using the flattened device
@@ -188,16 +223,22 @@ void __init early_setup(unsigned long dt_ptr)
         */
        early_init_devtree(__va(dt_ptr));
 
-       /* Now we know the logical id of our boot cpu, setup the paca. */
-       setup_boot_paca();
-
-       /* Fix up paca fields required for the boot cpu */
-       get_paca()->cpu_start = 1;
-       get_paca()->stab_real = __pa((u64)&initial_stab);
-       get_paca()->stab_addr = (u64)&initial_stab;
+       /*
+        * Iterate all ppc_md structures until we find the proper
+        * one for the current machine type
+        */
+       DBG("Probing machine type for platform %x...\n", _machine);
 
-       /* Probe the machine type */
-       probe_machine();
+       for (mach = machines; *mach; mach++) {
+               if ((*mach)->probe(_machine))
+                       break;
+       }
+       /* What can we do if we didn't find ? */
+       if (*mach == NULL) {
+               DBG("No suitable machine found !\n");
+               for (;;);
+       }
+       ppc_md = **mach;
 
 #ifdef CONFIG_CRASH_DUMP
        kdump_setup();
@@ -218,7 +259,7 @@ void __init early_setup(unsigned long dt_ptr)
        if (cpu_has_feature(CPU_FTR_SLB))
                slb_initialize();
        else if (!firmware_has_feature(FW_FEATURE_ISERIES))
-               stab_initialize(get_paca()->stab_real);
+               stab_initialize(lpaca->stab_real);
 
        DBG(" <- early_setup()\n");
 }
@@ -297,7 +338,7 @@ static void __init initialize_cache_info(void)
                        const char *dc, *ic;
 
                        /* Then read cache informations */
-                       if (machine_is(powermac)) {
+                       if (_machine == PLATFORM_POWERMAC) {
                                dc = "d-cache-block-size";
                                ic = "i-cache-block-size";
                        } else {
@@ -441,6 +482,7 @@ void __init setup_system(void)
        printk("ppc64_pft_size                = 0x%lx\n", ppc64_pft_size);
        printk("ppc64_interrupt_controller    = 0x%ld\n",
               ppc64_interrupt_controller);
+       printk("platform                      = 0x%x\n", _machine);
        printk("physicalMemorySize            = 0x%lx\n", lmb_phys_mem_size());
        printk("ppc64_caches.dcache_line_size = 0x%x\n",
               ppc64_caches.dline_size);
@@ -453,6 +495,8 @@ void __init setup_system(void)
 #endif
        printk("-----------------------------------------------------\n");
 
+       mm_init_ppc64();
+
        DBG(" <- setup_system()\n");
 }
 
@@ -472,7 +516,7 @@ static void __init irqstack_early_init(void)
         * interrupt stacks must be under 256MB, we cannot afford to take
         * SLB misses on them.
         */
-       for_each_possible_cpu(i) {
+       for_each_cpu(i) {
                softirq_ctx[i] = (struct thread_info *)
                        __va(lmb_alloc_base(THREAD_SIZE,
                                            THREAD_SIZE, 0x10000000));
@@ -505,7 +549,7 @@ static void __init emergency_stack_init(void)
         */
        limit = min(0x10000000UL, lmb.rmo_size);
 
-       for_each_possible_cpu(i)
+       for_each_cpu(i)
                paca[i].emergency_sp =
                __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE;
 }
@@ -535,8 +579,7 @@ void __init setup_arch(char **cmdline_p)
        panic_timeout = 180;
 
        if (ppc_md.panic)
-               atomic_notifier_chain_register(&panic_notifier_list,
-                               &ppc64_panic_block);
+               notifier_chain_register(&panic_notifier_list, &ppc64_panic_block);
 
        init_mm.start_code = PAGE_OFFSET;
        init_mm.end_code = (unsigned long) _etext;
@@ -558,6 +601,12 @@ void __init setup_arch(char **cmdline_p)
 
        ppc_md.setup_arch();
 
+       /* Use the default idle loop if the platform hasn't provided one. */
+       if (NULL == ppc_md.idle_loop) {
+               ppc_md.idle_loop = default_idle;
+               printk(KERN_INFO "Using default idle loop\n");
+       }
+
        paging_init();
        ppc64_boot_msg(0x15, "Setup Done");
 }
@@ -594,6 +643,14 @@ void ppc64_terminate_msg(unsigned int src, const char *msg)
        printk("[terminate]%04x %s\n", src, msg);
 }
 
+int check_legacy_ioport(unsigned long base_port)
+{
+       if (ppc_md.check_legacy_ioport == NULL)
+               return 0;
+       return ppc_md.check_legacy_ioport(base_port);
+}
+EXPORT_SYMBOL(check_legacy_ioport);
+
 void cpu_die(void)
 {
        if (ppc_md.cpu_die)
@@ -614,7 +671,7 @@ void __init setup_per_cpu_areas(void)
                size = PERCPU_ENOUGH_ROOM;
 #endif
 
-       for_each_possible_cpu(i) {
+       for_each_cpu(i) {
                ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
                if (!ptr)
                        panic("Cannot allocate cpu data for CPU %d\n", i);