2 * Architecture-specific setup.
4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Stephane Eranian <eranian@hpl.hp.com>
7 * Copyright (C) 2000, 2004 Intel Corp
8 * Rohit Seth <rohit.seth@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Gordon Jin <gordon.jin@intel.com>
11 * Copyright (C) 1999 VA Linux Systems
12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
14 * 12/26/04 S.Siddha, G.Jin, R.Seth
15 * Add multi-threading and multi-core detection
16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes
19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes...
20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP
21 * 01/07/99 S.Eranian added the support for command line argument
22 * 06/24/99 W.Drummond added boot_cpu_data.
23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
25 #include <linux/module.h>
26 #include <linux/init.h>
28 #include <linux/acpi.h>
29 #include <linux/bootmem.h>
30 #include <linux/console.h>
31 #include <linux/delay.h>
32 #include <linux/kernel.h>
33 #include <linux/reboot.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/string.h>
37 #include <linux/threads.h>
38 #include <linux/screen_info.h>
39 #include <linux/dmi.h>
40 #include <linux/serial.h>
41 #include <linux/serial_core.h>
42 #include <linux/efi.h>
43 #include <linux/initrd.h>
45 #include <linux/cpufreq.h>
46 #include <linux/kexec.h>
47 #include <linux/crash_dump.h>
50 #include <asm/machvec.h>
52 #include <asm/meminit.h>
54 #include <asm/patch.h>
55 #include <asm/pgtable.h>
56 #include <asm/processor.h>
58 #include <asm/sections.h>
59 #include <asm/setup.h>
61 #include <asm/system.h>
62 #include <asm/unistd.h>
63 #include <asm/system.h>
65 #include <asm/hypervisor.h>
67 #include <linux/dma-mapping.h>
69 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
70 # error "struct cpuinfo_ia64 too big!"
74 unsigned long __per_cpu_offset[NR_CPUS];
75 EXPORT_SYMBOL(__per_cpu_offset);
80 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
82 HYPERVISOR_shutdown(SHUTDOWN_crash);
83 /* we're never actually going to get here... */
87 static struct notifier_block xen_panic_block = {
88 .notifier_call = xen_panic_event,
90 .priority = 0 /* try to go last */
94 extern void ia64_setup_printk_clock(void);
96 DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
97 DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
98 DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
99 unsigned long ia64_cycles_per_usec;
100 struct ia64_boot_param *ia64_boot_param;
101 struct screen_info screen_info;
102 unsigned long vga_console_iobase;
103 unsigned long vga_console_membase;
105 static struct resource data_resource = {
106 .name = "Kernel data",
107 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
110 static struct resource code_resource = {
111 .name = "Kernel code",
112 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
114 extern void efi_initialize_iomem_resources(struct resource *,
116 extern char _text[], _end[], _etext[];
118 unsigned long ia64_max_cacheline_size;
120 int dma_get_cache_alignment(void)
122 return ia64_max_cacheline_size;
124 EXPORT_SYMBOL(dma_get_cache_alignment);
126 unsigned long ia64_iobase; /* virtual address for I/O accesses */
127 EXPORT_SYMBOL(ia64_iobase);
128 struct io_space io_space[MAX_IO_SPACES];
129 EXPORT_SYMBOL(io_space);
130 unsigned int num_io_spaces;
133 * "flush_icache_range()" needs to know what processor dependent stride size to use
134 * when it makes i-cache(s) coherent with d-caches.
136 #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
137 unsigned long ia64_i_cache_stride_shift = ~0;
140 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
141 * mask specifies a mask of address bits that must be 0 in order for two buffers to be
142 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start
143 * address of the second buffer must be aligned to (merge_mask+1) in order to be
144 * mergeable). By default, we assume there is no I/O MMU which can merge physically
145 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu
148 unsigned long ia64_max_iommu_merge_mask = ~0UL;
149 EXPORT_SYMBOL(ia64_max_iommu_merge_mask);
152 * We use a special marker for the end of memory and it uses the extra (+1) slot
154 struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata;
155 int num_rsvd_regions __initdata;
159 * Filter incoming memory segments based on the primitive map created from the boot
160 * parameters. Segments contained in the map are removed from the memory ranges. A
161 * caller-specified function is called with the memory ranges that remain after filtering.
162 * This routine does not assume the incoming segments are sorted.
165 filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
167 unsigned long range_start, range_end, prev_start;
168 void (*func)(unsigned long, unsigned long, int);
172 if (start == PAGE_OFFSET) {
173 printk(KERN_WARNING "warning: skipping physical page 0\n");
175 if (start >= end) return 0;
179 * lowest possible address(walker uses virtual)
181 prev_start = PAGE_OFFSET;
184 for (i = 0; i < num_rsvd_regions; ++i) {
185 range_start = max(start, prev_start);
186 range_end = min(end, rsvd_region[i].start);
188 if (range_start < range_end)
189 call_pernode_memory(__pa(range_start), range_end - range_start, func);
191 /* nothing more available in this segment */
192 if (range_end == end) return 0;
194 prev_start = rsvd_region[i].end;
196 /* end of memory marker allows full processing inside loop body */
201 rsvd_region_cmp(struct rsvd_region *lhs, struct rsvd_region *rhs)
203 if (lhs->start > rhs->start)
205 if (lhs->start < rhs->start)
208 if (lhs->end > rhs->end)
210 if (lhs->end < rhs->end)
217 sort_regions (struct rsvd_region *rsvd_region, int max)
222 /* simple bubble sorting */
224 for (j = 0; j < max; ++j) {
225 if (rsvd_region_cmp(&rsvd_region[j],
226 &rsvd_region[j + 1]) > 0) {
227 struct rsvd_region tmp;
228 tmp = rsvd_region[j];
229 rsvd_region[j] = rsvd_region[j + 1];
230 rsvd_region[j + 1] = tmp;
235 for (j = 0; j < num - 1; j++) {
237 unsigned long start = rsvd_region[j].start;
238 unsigned long end = rsvd_region[j].end;
241 for (k = j + 1; k < num; k++) {
242 BUG_ON(start > rsvd_region[k].start);
243 if (end < rsvd_region[k].start) {
247 end = max(end, rsvd_region[k].end);
251 rsvd_region[j].end = end;
254 for (k = j + 1; k < num; k++) {
255 rsvd_region[k] = rsvd_region[k + collapsed];
259 num_rsvd_regions = num;
260 for (j = 0; j < num; j++) {
261 printk("rsvd_region[%d]: [0x%016lx, 0x%06lx)\n",
262 j, rsvd_region[j].start, rsvd_region[j].end);
267 * Request address space for all standard resources
269 static int __init register_memory(void)
271 code_resource.start = ia64_tpa(_text);
272 code_resource.end = ia64_tpa(_etext) - 1;
273 data_resource.start = ia64_tpa(_etext);
274 data_resource.end = ia64_tpa(_end) - 1;
275 efi_initialize_iomem_resources(&code_resource, &data_resource);
280 __initcall(register_memory);
283 * reserve_memory - setup reserved memory areas
285 * Setup the reserved memory areas set aside for the boot parameters,
286 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
287 * see include/asm-ia64/meminit.h if you need to define more.
290 reserve_memory (void)
295 * none of the entries in this table overlap
297 rsvd_region[n].start = (unsigned long) ia64_boot_param;
298 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param);
301 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap);
302 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size;
305 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line);
306 rsvd_region[n].end = (rsvd_region[n].start
307 + strlen(__va(ia64_boot_param->command_line)) + 1);
310 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
311 rsvd_region[n].end = (unsigned long) ia64_imva(_end);
315 if (is_running_on_xen()) {
316 rsvd_region[n].start = (unsigned long)__va((HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
317 rsvd_region[n].end = rsvd_region[n].start + PAGE_SIZE;
322 #ifdef CONFIG_BLK_DEV_INITRD
323 if (ia64_boot_param->initrd_start) {
324 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start);
325 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size;
330 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
334 /* crashkernel=size@offset specifies the size to reserve for a crash
335 * kernel. If offset is 0, then it is determined automatically.
336 * By reserving this memory we guarantee that linux never set's it
337 * up as a DMA target.Useful for holding code to do something
338 * appropriate after a kernel panic.
341 char *from = strstr(saved_command_line, "crashkernel=");
342 unsigned long base, size;
344 size = memparse(from + 12, &from);
346 base = memparse(from+1, &from);
351 sort_regions(rsvd_region, n);
352 base = kdump_find_rsvd_region(size,
356 rsvd_region[n].start =
357 (unsigned long)__va(base);
359 (unsigned long)__va(base + size);
361 crashk_res.start = base;
362 crashk_res.end = base + size - 1;
366 efi_memmap_res.start = ia64_boot_param->efi_memmap;
367 efi_memmap_res.end = efi_memmap_res.start +
368 ia64_boot_param->efi_memmap_size;
369 boot_param_res.start = __pa(ia64_boot_param);
370 boot_param_res.end = boot_param_res.start +
371 sizeof(*ia64_boot_param);
374 /* end of memory marker */
375 rsvd_region[n].start = ~0UL;
376 rsvd_region[n].end = ~0UL;
379 num_rsvd_regions = n;
380 BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
382 sort_regions(rsvd_region, num_rsvd_regions);
387 * find_initrd - get initrd parameters from the boot parameter structure
389 * Grab the initrd start and end from the boot parameter struct given us by
395 #ifdef CONFIG_BLK_DEV_INITRD
396 if (ia64_boot_param->initrd_start) {
397 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
398 initrd_end = initrd_start+ia64_boot_param->initrd_size;
400 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
401 initrd_start, ia64_boot_param->initrd_size);
409 unsigned long phys_iobase;
412 * Set `iobase' based on the EFI memory map or, failing that, the
413 * value firmware left in ar.k0.
415 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
416 * the port's virtual address, so ia32_load_state() loads it with a
417 * user virtual address. But in ia64 mode, glibc uses the
418 * *physical* address in ar.k0 to mmap the appropriate area from
419 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both
420 * cases, user-mode can only use the legacy 0-64K I/O port space.
422 * ar.k0 is not involved in kernel I/O port accesses, which can use
423 * any of the I/O port spaces and are done via MMIO using the
424 * virtual mmio_base from the appropriate io_space[].
426 phys_iobase = efi_get_iobase();
428 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
429 printk(KERN_INFO "No I/O port range found in EFI memory map, "
430 "falling back to AR.KR0 (0x%lx)\n", phys_iobase);
432 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
433 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
435 /* setup legacy IO port space */
436 io_space[0].mmio_base = ia64_iobase;
437 io_space[0].sparse = 1;
442 * early_console_setup - setup debugging console
444 * Consoles started here require little enough setup that we can start using
445 * them very early in the boot process, either right after the machine
446 * vector initialization, or even before if the drivers can detect their hw.
448 * Returns non-zero if a console couldn't be setup.
450 static inline int __init
451 early_console_setup (char *cmdline)
456 #ifndef CONFIG_IA64_HP_SIM
457 if (is_running_on_xen()) {
458 extern struct console hpsim_cons;
459 hpsim_cons.flags |= CON_BOOT;
460 register_console(&hpsim_cons);
465 #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
467 extern int sn_serial_console_early_setup(void);
468 if (!sn_serial_console_early_setup())
472 #ifdef CONFIG_EFI_PCDP
473 if (!efi_setup_pcdp_console(cmdline))
476 #ifdef CONFIG_SERIAL_8250_CONSOLE
477 if (!early_serial_console_init(cmdline))
481 return (earlycons) ? 0 : -1;
485 mark_bsp_online (void)
488 /* If we register an early console, allow CPU 0 to printk */
489 cpu_set(smp_processor_id(), cpu_online_map);
495 check_for_logical_procs (void)
497 pal_logical_to_physical_t info;
500 status = ia64_pal_logical_to_phys(0, &info);
502 printk(KERN_INFO "No logical to physical processor mapping "
507 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
512 * Total number of siblings that BSP has. Though not all of them
513 * may have booted successfully. The correct number of siblings
514 * booted is in info.overview_num_log.
516 smp_num_siblings = info.overview_tpc;
517 smp_num_cpucores = info.overview_cpp;
521 static __initdata int nomca;
522 static __init int setup_nomca(char *s)
527 early_param("nomca", setup_nomca);
529 #ifdef CONFIG_PROC_VMCORE
530 /* elfcorehdr= specifies the location of elf core header
531 * stored by the crashed kernel.
533 static int __init parse_elfcorehdr(char *arg)
538 elfcorehdr_addr = memparse(arg, &arg);
541 early_param("elfcorehdr", parse_elfcorehdr);
542 #endif /* CONFIG_PROC_VMCORE */
545 setup_arch (char **cmdline_p)
550 if (is_running_on_xen()) {
551 setup_xen_features();
552 /* Register a call for panic conditions. */
553 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
557 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
559 *cmdline_p = __va(ia64_boot_param->command_line);
560 strlcpy(saved_command_line, *cmdline_p, COMMAND_LINE_SIZE);
567 #ifdef CONFIG_IA64_GENERIC
571 if (early_console_setup(*cmdline_p) == 0)
575 /* Initialize the ACPI boot-time table parser */
577 # ifdef CONFIG_ACPI_NUMA
582 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
584 #endif /* CONFIG_APCI_BOOT */
588 /* process SAL system table: */
589 ia64_sal_init(__va(efi.sal_systab));
591 ia64_setup_printk_clock();
594 cpu_physical_id(0) = hard_smp_processor_id();
596 cpu_set(0, cpu_sibling_map[0]);
597 cpu_set(0, cpu_core_map[0]);
599 check_for_logical_procs();
600 if (smp_num_cpucores > 1)
602 "cpu package is Multi-Core capable: number of cores=%d\n",
604 if (smp_num_siblings > 1)
606 "cpu package is Multi-Threading capable: number of siblings=%d\n",
610 cpu_init(); /* initialize the bootstrap CPU */
611 mmu_context_init(); /* initialize context_id bitmap */
613 check_sal_cache_flush();
621 # if defined(CONFIG_DUMMY_CONSOLE)
622 conswitchp = &dummy_con;
624 # if defined(CONFIG_VGA_CONSOLE)
626 * Non-legacy systems may route legacy VGA MMIO range to system
627 * memory. vga_con probes the MMIO hole, so memory looks like
628 * a VGA device to it. The EFI memory map can tell us if it's
629 * memory so we can avoid this problem.
631 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY)
632 conswitchp = &vga_con;
636 if (is_running_on_xen()) {
637 shared_info_t *s = HYPERVISOR_shared_info;
639 xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
641 printk("Running on Xen! start_info_pfn=0x%lx nr_pages=%ld "
642 "flags=0x%x\n", s->arch.start_info_pfn,
643 xen_start_info->nr_pages, xen_start_info->flags);
645 if (!is_initial_xendomain()) {
646 #if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
654 /* enable IA-64 Machine Check Abort Handling unless disabled */
658 platform_setup(cmdline_p);
661 contiguous_bitmap_init(max_pfn);
666 * Display cpu info for all cpu's.
669 show_cpuinfo (struct seq_file *m, void *v)
672 # define lpj c->loops_per_jiffy
673 # define cpunum c->cpu
675 # define lpj loops_per_jiffy
680 const char *feature_name;
682 { 1UL << 0, "branchlong" },
683 { 1UL << 1, "spontaneous deferral"},
684 { 1UL << 2, "16-byte atomic ops" }
686 char features[128], *cp, sep;
687 struct cpuinfo_ia64 *c = v;
689 unsigned long proc_freq;
694 /* build the feature string: */
695 memcpy(features, " standard", 10);
698 for (i = 0; i < (int) ARRAY_SIZE(feature_bits); ++i) {
699 if (mask & feature_bits[i].mask) {
704 strcpy(cp, feature_bits[i].feature_name);
705 cp += strlen(feature_bits[i].feature_name);
706 mask &= ~feature_bits[i].mask;
710 /* print unknown features as a hex value: */
713 sprintf(cp, " 0x%lx", mask);
716 proc_freq = cpufreq_quick_get(cpunum);
718 proc_freq = c->proc_freq / 1000;
729 "features :%s\n" /* don't change this---it _is_ right! */
732 "cpu MHz : %lu.%06lu\n"
733 "itc MHz : %lu.%06lu\n"
734 "BogoMIPS : %lu.%02lu\n",
735 cpunum, c->vendor, c->family, c->model,
736 c->model_name, c->revision, c->archrev,
737 features, c->ppn, c->number,
738 proc_freq / 1000, proc_freq % 1000,
739 c->itc_freq / 1000000, c->itc_freq % 1000000,
740 lpj*HZ/500000, (lpj*HZ/5000) % 100);
742 seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum]));
743 if (c->threads_per_core > 1 || c->cores_per_socket > 1)
748 c->socket_id, c->core_id, c->thread_id);
756 c_start (struct seq_file *m, loff_t *pos)
759 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
762 return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
766 c_next (struct seq_file *m, void *v, loff_t *pos)
769 return c_start(m, pos);
773 c_stop (struct seq_file *m, void *v)
777 struct seq_operations cpuinfo_op = {
784 static char brandname[128];
786 static char * __cpuinit
787 get_model_name(__u8 family, __u8 model)
791 memcpy(brand, "Unknown", 8);
792 if (ia64_pal_get_brand_info(brand)) {
794 memcpy(brand, "Merced", 7);
795 else if (family == 0x1f) switch (model) {
796 case 0: memcpy(brand, "McKinley", 9); break;
797 case 1: memcpy(brand, "Madison", 8); break;
798 case 2: memcpy(brand, "Madison up to 9M cache", 23); break;
801 if (brandname[0] == '\0')
802 return strcpy(brandname, brand);
803 else if (strcmp(brandname, brand) == 0)
806 return kstrdup(brand, GFP_KERNEL);
809 static void __cpuinit
810 identify_cpu (struct cpuinfo_ia64 *c)
813 unsigned long bits[5];
819 u64 ppn; /* processor serial number */
823 unsigned revision : 8;
826 unsigned archrev : 8;
827 unsigned reserved : 24;
833 pal_vm_info_1_u_t vm1;
834 pal_vm_info_2_u_t vm2;
836 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */
838 for (i = 0; i < 5; ++i)
839 cpuid.bits[i] = ia64_get_cpuid(i);
841 memcpy(c->vendor, cpuid.field.vendor, 16);
843 c->cpu = smp_processor_id();
845 /* below default values will be overwritten by identify_siblings()
846 * for Multi-Threading/Multi-Core capable cpu's
848 c->threads_per_core = c->cores_per_socket = c->num_log = 1;
851 identify_siblings(c);
853 c->ppn = cpuid.field.ppn;
854 c->number = cpuid.field.number;
855 c->revision = cpuid.field.revision;
856 c->model = cpuid.field.model;
857 c->family = cpuid.field.family;
858 c->archrev = cpuid.field.archrev;
859 c->features = cpuid.field.features;
860 c->model_name = get_model_name(c->family, c->model);
862 status = ia64_pal_vm_summary(&vm1, &vm2);
863 if (status == PAL_STATUS_SUCCESS) {
864 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb;
865 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size;
867 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1));
868 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
872 setup_per_cpu_areas (void)
874 /* start_kernel() requires this... */
875 #ifdef CONFIG_ACPI_HOTPLUG_CPU
876 prefill_possible_map();
881 * Calculate the max. cache line size.
883 * In addition, the minimum of the i-cache stride sizes is calculated for
884 * "flush_icache_range()".
886 static void __cpuinit
887 get_max_cacheline_size (void)
889 unsigned long line_size, max = 1;
890 unsigned int cache_size = 0;
891 u64 l, levels, unique_caches;
892 pal_cache_config_info_t cci;
895 status = ia64_pal_cache_summary(&levels, &unique_caches);
897 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
898 __FUNCTION__, status);
899 max = SMP_CACHE_BYTES;
900 /* Safest setup for "flush_icache_range()" */
901 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
905 for (l = 0; l < levels; ++l) {
906 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2,
910 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
911 __FUNCTION__, l, status);
912 max = SMP_CACHE_BYTES;
913 /* The safest setup for "flush_icache_range()" */
914 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
915 cci.pcci_unified = 1;
917 line_size = 1 << cci.pcci_line_size;
920 if (cache_size < cci.pcci_cache_size)
921 cache_size = cci.pcci_cache_size;
922 if (!cci.pcci_unified) {
923 status = ia64_pal_cache_config_info(l,
924 /* cache_type (instruction)= */ 1,
928 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
929 __FUNCTION__, l, status);
930 /* The safest setup for "flush_icache_range()" */
931 cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
934 if (cci.pcci_stride < ia64_i_cache_stride_shift)
935 ia64_i_cache_stride_shift = cci.pcci_stride;
939 max_cache_size = max(max_cache_size, cache_size);
941 if (max > ia64_max_cacheline_size)
942 ia64_max_cacheline_size = max;
946 * cpu_init() initializes state that is per-CPU. This function acts
947 * as a 'CPU state barrier', nothing should get across.
952 extern void __cpuinit ia64_mmu_init (void *);
953 unsigned long num_phys_stacked;
954 pal_vm_info_2_u_t vmi;
955 unsigned int max_ctx;
956 struct cpuinfo_ia64 *cpu_info;
959 cpu_data = per_cpu_init();
962 * We set ar.k3 so that assembly code in MCA handler can compute
963 * physical addresses of per cpu variables with a simple:
964 * phys = ar.k3 + &per_cpu_var
966 ia64_set_kr(IA64_KR_PER_CPU_DATA,
967 ia64_tpa(cpu_data) - (long) __per_cpu_start);
969 get_max_cacheline_size();
972 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called
973 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it
974 * depends on the data returned by identify_cpu(). We break the dependency by
975 * accessing cpu_data() through the canonical per-CPU address.
977 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
978 identify_cpu(cpu_info);
980 #ifdef CONFIG_MCKINLEY
982 # define FEATURE_SET 16
983 struct ia64_pal_retval iprv;
985 if (cpu_info->family == 0x1f) {
986 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
987 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
988 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
989 (iprv.v1 | 0x80), FEATURE_SET, 0);
994 /* Clear the stack memory reserved for pt_regs: */
995 memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
997 ia64_set_kr(IA64_KR_FPU_OWNER, 0);
1000 * Initialize the page-table base register to a global
1001 * directory with all zeroes. This ensure that we can handle
1002 * TLB-misses to user address-space even before we created the
1003 * first user address-space. This may happen, e.g., due to
1004 * aggressive use of lfetch.fault.
1006 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
1009 * Initialize default control register to defer speculative faults except
1010 * for those arising from TLB misses, which are not deferred. The
1011 * kernel MUST NOT depend on a particular setting of these bits (in other words,
1012 * the kernel must have recovery code for all speculative accesses). Turn on
1013 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps
1014 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll
1017 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
1018 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
1019 atomic_inc(&init_mm.mm_count);
1020 current->active_mm = &init_mm;
1024 ia64_mmu_init(ia64_imva(cpu_data));
1025 ia64_mca_cpu_init(ia64_imva(cpu_data));
1027 #ifdef CONFIG_IA32_SUPPORT
1031 /* Clear ITC to eliminiate sched_clock() overflows in human time. */
1034 /* disable all local interrupt sources: */
1035 ia64_set_itv(1 << 16);
1036 ia64_set_lrr0(1 << 16);
1037 ia64_set_lrr1(1 << 16);
1038 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16);
1039 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16);
1041 /* clear TPR & XTP to enable all interrupt classes: */
1042 ia64_setreg(_IA64_REG_CR_TPR, 0);
1047 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */
1048 if (ia64_pal_vm_summary(NULL, &vmi) == 0)
1049 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1;
1051 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n");
1052 max_ctx = (1U << 15) - 1; /* use architected minimum */
1054 while (max_ctx < ia64_ctx.max_ctx) {
1055 unsigned int old = ia64_ctx.max_ctx;
1056 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old)
1060 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
1061 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
1063 num_phys_stacked = 96;
1065 /* size of physical stacked register partition plus 8 bytes: */
1066 __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
1067 platform_cpu_init();
1070 /* Need to be moved into platform_cpu_init later */
1071 if (is_running_on_xen()) {
1072 extern void xen_smp_intr_init(void);
1073 xen_smp_intr_init();
1077 pm_idle = default_idle;
1081 * On SMP systems, when the scheduler does migration-cost autodetection,
1082 * it needs a way to flush as much of the CPU's caches as possible.
1084 void sched_cacheflush(void)
1086 ia64_sal_cache_flush(3);
1092 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles,
1093 (unsigned long) __end___mckinley_e9_bundles);
1096 static int __init run_dmi_scan(void)
1101 core_initcall(run_dmi_scan);