/*
* Architecture-specific setup.
*
- * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
+ * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
- * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
+ * Copyright (C) 2000, 2004 Intel Corp
+ * Rohit Seth <rohit.seth@intel.com>
+ * Suresh Siddha <suresh.b.siddha@intel.com>
+ * Gordon Jin <gordon.jin@intel.com>
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
*
+ * 12/26/04 S.Siddha, G.Jin, R.Seth
+ * Add multi-threading and multi-core detection
* 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
* 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
* 03/31/00 R.Seth cpu_initialized and current->processor fixes
* 02/01/00 R.Seth fixed get_cpuinfo for SMP
* 01/07/99 S.Eranian added the support for command line argument
* 06/24/99 W.Drummond added boot_cpu_data.
+ * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/serial_core.h>
#include <linux/efi.h>
#include <linux/initrd.h>
+#include <linux/platform.h>
+#include <linux/pm.h>
+#include <linux/cpufreq.h>
#include <asm/ia32.h>
#include <asm/machvec.h>
#include <asm/smp.h>
#include <asm/system.h>
#include <asm/unistd.h>
+#include <asm/system.h>
#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
# error "struct cpuinfo_ia64 too big!"
EXPORT_SYMBOL(__per_cpu_offset);
#endif
+extern void ia64_setup_printk_clock(void);
+
DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
unsigned long ia64_cycles_per_usec;
struct ia64_boot_param *ia64_boot_param;
struct screen_info screen_info;
+unsigned long vga_console_iobase;
+unsigned long vga_console_membase;
+
+static struct resource data_resource = {
+ .name = "Kernel data",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+
+static struct resource code_resource = {
+ .name = "Kernel code",
+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+};
+extern void efi_initialize_iomem_resources(struct resource *,
+ struct resource *);
+extern char _text[], _end[], _etext[];
unsigned long ia64_max_cacheline_size;
+
+int dma_get_cache_alignment(void)
+{
+ return ia64_max_cacheline_size;
+}
+EXPORT_SYMBOL(dma_get_cache_alignment);
+
unsigned long ia64_iobase; /* virtual address for I/O accesses */
EXPORT_SYMBOL(ia64_iobase);
struct io_space io_space[MAX_IO_SPACES];
EXPORT_SYMBOL(io_space);
unsigned int num_io_spaces;
-unsigned char aux_device_present = 0xaa; /* XXX remove this when legacy I/O is gone */
+/*
+ * "flush_icache_range()" needs to know what processor dependent stride size to use
+ * when it makes i-cache(s) coherent with d-caches.
+ */
+#define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
+unsigned long ia64_i_cache_stride_shift = ~0;
/*
* The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
}
}
+/*
+ * Request address space for all standard resources
+ */
+static int __init register_memory(void)
+{
+ code_resource.start = ia64_tpa(_text);
+ code_resource.end = ia64_tpa(_etext) - 1;
+ data_resource.start = ia64_tpa(_etext);
+ data_resource.end = ia64_tpa(_end) - 1;
+ efi_initialize_iomem_resources(&code_resource, &data_resource);
+
+ return 0;
+}
+
+__initcall(register_memory);
+
/**
* reserve_memory - setup reserved memory areas
*
}
#endif
+ efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
+ n++;
+
/* end of memory marker */
rsvd_region[n].start = ~0UL;
rsvd_region[n].end = ~0UL;
static void __init
io_port_init (void)
{
- extern unsigned long ia64_iobase;
unsigned long phys_iobase;
/*
- * Set `iobase' to the appropriate address in region 6 (uncached access range).
+ * Set `iobase' based on the EFI memory map or, failing that, the
+ * value firmware left in ar.k0.
+ *
+ * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute
+ * the port's virtual address, so ia32_load_state() loads it with a
+ * user virtual address. But in ia64 mode, glibc uses the
+ * *physical* address in ar.k0 to mmap the appropriate area from
+ * /dev/mem, and the inX()/outX() interfaces use MMIO. In both
+ * cases, user-mode can only use the legacy 0-64K I/O port space.
*
- * The EFI memory map is the "preferred" location to get the I/O port space base,
- * rather the relying on AR.KR0. This should become more clear in future SAL
- * specs. We'll fall back to getting it out of AR.KR0 if no appropriate entry is
- * found in the memory map.
+ * ar.k0 is not involved in kernel I/O port accesses, which can use
+ * any of the I/O port spaces and are done via MMIO using the
+ * virtual mmio_base from the appropriate io_space[].
*/
phys_iobase = efi_get_iobase();
- if (phys_iobase)
- /* set AR.KR0 since this is all we use it for anyway */
- ia64_set_kr(IA64_KR_IO_BASE, phys_iobase);
- else {
+ if (!phys_iobase) {
phys_iobase = ia64_get_kr(IA64_KR_IO_BASE);
- printk(KERN_INFO "No I/O port range found in EFI memory map, falling back "
- "to AR.KR0\n");
- printk(KERN_INFO "I/O port base = 0x%lx\n", phys_iobase);
+ printk(KERN_INFO "No I/O port range found in EFI memory map, "
+ "falling back to AR.KR0 (0x%lx)\n", phys_iobase);
}
ia64_iobase = (unsigned long) ioremap(phys_iobase, 0);
+ ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
/* setup legacy IO port space */
io_space[0].mmio_base = ia64_iobase;
num_io_spaces = 1;
}
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-static void __init
-setup_serial_legacy (void)
-{
- struct uart_port port;
- unsigned int i, iobase[] = {0x3f8, 0x2f8};
-
- printk(KERN_INFO "Registering legacy COM ports for serial console\n");
- memset(&port, 0, sizeof(port));
- port.iotype = SERIAL_IO_PORT;
- port.uartclk = BASE_BAUD * 16;
- for (i = 0; i < ARRAY_SIZE(iobase); i++) {
- port.line = i;
- port.iobase = iobase[i];
- early_serial_setup(&port);
- }
-}
-#endif
-
/**
* early_console_setup - setup debugging console
*
* Returns non-zero if a console couldn't be setup.
*/
static inline int __init
-early_console_setup (void)
+early_console_setup (char *cmdline)
{
+ int earlycons = 0;
+
#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
{
extern int sn_serial_console_early_setup(void);
- if(!sn_serial_console_early_setup())
- return 0;
+ if (!sn_serial_console_early_setup())
+ earlycons++;
}
#endif
+#ifdef CONFIG_EFI_PCDP
+ if (!efi_setup_pcdp_console(cmdline))
+ earlycons++;
+#endif
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ if (!early_serial_console_init(cmdline))
+ earlycons++;
+#endif
- return -1;
+ return (earlycons) ? 0 : -1;
}
+static inline void
+mark_bsp_online (void)
+{
+#ifdef CONFIG_SMP
+ /* If we register an early console, allow CPU 0 to printk */
+ cpu_set(smp_processor_id(), cpu_online_map);
+#endif
+}
+
+#ifdef CONFIG_SMP
+static void
+check_for_logical_procs (void)
+{
+ pal_logical_to_physical_t info;
+ s64 status;
+
+ status = ia64_pal_logical_to_phys(0, &info);
+ if (status == -1) {
+ printk(KERN_INFO "No logical to physical processor mapping "
+ "available\n");
+ return;
+ }
+ if (status) {
+ printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
+ status);
+ return;
+ }
+ /*
+ * Total number of siblings that BSP has. Though not all of them
+ * may have booted successfully. The correct number of siblings
+ * booted is in info.overview_num_log.
+ */
+ smp_num_siblings = info.overview_tpc;
+ smp_num_cpucores = info.overview_cpp;
+}
+#endif
+
void __init
setup_arch (char **cmdline_p)
{
io_port_init();
#ifdef CONFIG_IA64_GENERIC
- machvec_init(acpi_get_sysname());
+ {
+ const char *mvec_name = strstr (*cmdline_p, "machvec=");
+ char str[64];
+
+ if (mvec_name) {
+ const char *end;
+ size_t len;
+
+ mvec_name += 8;
+ end = strchr (mvec_name, ' ');
+ if (end)
+ len = end - mvec_name;
+ else
+ len = strlen (mvec_name);
+ len = min(len, sizeof (str) - 1);
+ strncpy (str, mvec_name, len);
+ str[len] = '\0';
+ mvec_name = str;
+ } else
+ mvec_name = acpi_get_sysname();
+ machvec_init(mvec_name);
+ }
#endif
-#ifdef CONFIG_SMP
- /* If we register an early console, allow CPU 0 to printk */
- if (!early_console_setup())
- cpu_set(smp_processor_id(), cpu_online_map);
-#endif
+ if (early_console_setup(*cmdline_p) == 0)
+ mark_bsp_online();
-#ifdef CONFIG_ACPI_BOOT
+ parse_early_param();
+#ifdef CONFIG_ACPI
/* Initialize the ACPI boot-time table parser */
acpi_table_init();
# ifdef CONFIG_ACPI_NUMA
/* process SAL system table: */
ia64_sal_init(efi.sal_systab);
+ ia64_setup_printk_clock();
+
#ifdef CONFIG_SMP
cpu_physical_id(0) = hard_smp_processor_id();
+
+ cpu_set(0, cpu_sibling_map[0]);
+ cpu_set(0, cpu_core_map[0]);
+
+ check_for_logical_procs();
+ if (smp_num_cpucores > 1)
+ printk(KERN_INFO
+ "cpu package is Multi-Core capable: number of cores=%d\n",
+ smp_num_cpucores);
+ if (smp_num_siblings > 1)
+ printk(KERN_INFO
+ "cpu package is Multi-Threading capable: number of siblings=%d\n",
+ smp_num_siblings);
#endif
cpu_init(); /* initialize the bootstrap CPU */
+ mmu_context_init(); /* initialize context_id bitmap */
-#ifdef CONFIG_ACPI_BOOT
+#ifdef CONFIG_ACPI
acpi_boot_init();
#endif
-#ifdef CONFIG_EFI_PCDP
- efi_setup_pcdp_console(*cmdline_p);
-#endif
-#ifdef CONFIG_SERIAL_8250_CONSOLE
- if (!efi.hcdp)
- setup_serial_legacy();
-#endif
#ifdef CONFIG_VT
if (!conswitchp) {
/* enable IA-64 Machine Check Abort Handling unless disabled */
if (!strstr(saved_command_line, "nomca"))
ia64_mca_init();
-
+
platform_setup(cmdline_p);
paging_init();
}
char family[32], features[128], *cp, sep;
struct cpuinfo_ia64 *c = v;
unsigned long mask;
+ unsigned long proc_freq;
int i;
mask = c->features;
sprintf(cp, " 0x%lx", mask);
}
+ proc_freq = cpufreq_quick_get(cpunum);
+ if (!proc_freq)
+ proc_freq = c->proc_freq / 1000;
+
seq_printf(m,
"processor : %d\n"
"vendor : %s\n"
"cpu regs : %u\n"
"cpu MHz : %lu.%06lu\n"
"itc MHz : %lu.%06lu\n"
- "BogoMIPS : %lu.%02lu\n\n",
+ "BogoMIPS : %lu.%02lu\n",
cpunum, c->vendor, family, c->model, c->revision, c->archrev,
features, c->ppn, c->number,
- c->proc_freq / 1000000, c->proc_freq % 1000000,
+ proc_freq / 1000, proc_freq % 1000,
c->itc_freq / 1000000, c->itc_freq % 1000000,
lpj*HZ/500000, (lpj*HZ/5000) % 100);
+#ifdef CONFIG_SMP
+ seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum]));
+ if (c->threads_per_core > 1 || c->cores_per_socket > 1)
+ seq_printf(m,
+ "physical id: %u\n"
+ "core id : %u\n"
+ "thread id : %u\n",
+ c->socket_id, c->core_id, c->thread_id);
+#endif
+ seq_printf(m,"\n");
+
return 0;
}
memcpy(c->vendor, cpuid.field.vendor, 16);
#ifdef CONFIG_SMP
c->cpu = smp_processor_id();
+
+ /* below default values will be overwritten by identify_siblings()
+ * for Multi-Threading/Multi-Core capable cpu's
+ */
+ c->threads_per_core = c->cores_per_socket = c->num_log = 1;
+ c->socket_id = -1;
+
+ identify_siblings(c);
#endif
c->ppn = cpuid.field.ppn;
c->number = cpuid.field.number;
setup_per_cpu_areas (void)
{
/* start_kernel() requires this... */
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+ prefill_possible_map();
+#endif
}
+/*
+ * Calculate the max. cache line size.
+ *
+ * In addition, the minimum of the i-cache stride sizes is calculated for
+ * "flush_icache_range()".
+ */
static void
get_max_cacheline_size (void)
{
unsigned long line_size, max = 1;
+ unsigned int cache_size = 0;
u64 l, levels, unique_caches;
pal_cache_config_info_t cci;
s64 status;
printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
__FUNCTION__, status);
max = SMP_CACHE_BYTES;
+ /* Safest setup for "flush_icache_range()" */
+ ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
goto out;
}
&cci);
if (status != 0) {
printk(KERN_ERR
- "%s: ia64_pal_cache_config_info(l=%lu) failed (status=%ld)\n",
+ "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
__FUNCTION__, l, status);
max = SMP_CACHE_BYTES;
+ /* The safest setup for "flush_icache_range()" */
+ cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
+ cci.pcci_unified = 1;
}
line_size = 1 << cci.pcci_line_size;
if (line_size > max)
max = line_size;
- }
+ if (cache_size < cci.pcci_cache_size)
+ cache_size = cci.pcci_cache_size;
+ if (!cci.pcci_unified) {
+ status = ia64_pal_cache_config_info(l,
+ /* cache_type (instruction)= */ 1,
+ &cci);
+ if (status != 0) {
+ printk(KERN_ERR
+ "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
+ __FUNCTION__, l, status);
+ /* The safest setup for "flush_icache_range()" */
+ cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
+ }
+ }
+ if (cci.pcci_stride < ia64_i_cache_stride_shift)
+ ia64_i_cache_stride_shift = cci.pcci_stride;
+ }
out:
+#ifdef CONFIG_SMP
+ max_cache_size = max(max_cache_size, cache_size);
+#endif
if (max > ia64_max_cacheline_size)
ia64_max_cacheline_size = max;
}
cpu_data = per_cpu_init();
+ /*
+ * We set ar.k3 so that assembly code in MCA handler can compute
+ * physical addresses of per cpu variables with a simple:
+ * phys = ar.k3 + &per_cpu_var
+ */
+ ia64_set_kr(IA64_KR_PER_CPU_DATA,
+ ia64_tpa(cpu_data) - (long) __per_cpu_start);
+
get_max_cacheline_size();
/*
#endif
/* Clear the stack memory reserved for pt_regs: */
- memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
+ memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
ia64_set_kr(IA64_KR_FPU_OWNER, 0);
/*
- * Initialize default control register to defer all speculative faults. The
+ * Initialize the page-table base register to a global
+ * directory with all zeroes. This ensure that we can handle
+ * TLB-misses to user address-space even before we created the
+ * first user address-space. This may happen, e.g., due to
+ * aggressive use of lfetch.fault.
+ */
+ ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page)));
+
+ /*
+ * Initialize default control register to defer speculative faults except
+ * for those arising from TLB misses, which are not deferred. The
* kernel MUST NOT depend on a particular setting of these bits (in other words,
* the kernel must have recovery code for all speculative accesses). Turn on
* dcr.lc as per recommendation by the architecture team. Most IA-32 apps
BUG();
ia64_mmu_init(ia64_imva(cpu_data));
+ ia64_mca_cpu_init(ia64_imva(cpu_data));
#ifdef CONFIG_IA32_SUPPORT
ia32_cpu_init();
break;
}
- if (ia64_pal_rse_info(&num_phys_stacked, 0) != 0) {
+ if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) {
printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical "
"stacked regs\n");
num_phys_stacked = 96;
/* size of physical stacked register partition plus 8 bytes: */
__get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
platform_cpu_init();
+ pm_idle = default_idle;
+}
+
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ */
+void sched_cacheflush(void)
+{
+ ia64_sal_cache_flush(3);
}
void