* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/cache.h>
#include <linux/jiffies.h>
#include <linux/profile.h>
+#include <linux/bootmem.h>
#include <asm/head.h>
#include <asm/ptrace.h>
#include <asm/starfire.h>
#include <asm/tlb.h>
-extern int linux_num_cpus;
extern void calibrate_delay(void);
/* Please don't make this stuff initdata!!! --DaveM */
static unsigned char boot_cpu_id;
-cpumask_t cpu_online_map = CPU_MASK_NONE;
-cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
+cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
+cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
static cpumask_t smp_commenced_mask;
static cpumask_t cpu_callout_map;
cpu_data(id).pgcache_size = 0;
cpu_data(id).pte_cache[0] = NULL;
cpu_data(id).pte_cache[1] = NULL;
- cpu_data(id).pgdcache_size = 0;
cpu_data(id).pgd_cache = NULL;
cpu_data(id).idle_volume = 1;
+
+ cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
+ 16 * 1024);
+ cpu_data(id).dcache_line_size =
+ prom_getintdefault(cpu_node, "dcache-line-size", 32);
+ cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
+ 16 * 1024);
+ cpu_data(id).icache_line_size =
+ prom_getintdefault(cpu_node, "icache-line-size", 32);
+ cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
+ 4 * 1024 * 1024);
+ cpu_data(id).ecache_line_size =
+ prom_getintdefault(cpu_node, "ecache-line-size", 64);
+ printk("CPU[%d]: Caches "
+ "D[sz(%d):line_sz(%d)] "
+ "I[sz(%d):line_sz(%d)] "
+ "E[sz(%d):line_sz(%d)]\n",
+ id,
+ cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
+ cpu_data(id).icache_size, cpu_data(id).icache_line_size,
+ cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
}
static void smp_setup_percpu_timer(void);
extern void inherit_locked_prom_mappings(int save_p);
+static inline void cpu_setup_percpu_base(unsigned long cpu_id)
+{
+ __asm__ __volatile__("mov %0, %%g5\n\t"
+ "stxa %0, [%1] %2\n\t"
+ "membar #Sync"
+ : /* no outputs */
+ : "r" (__per_cpu_offset(cpu_id)),
+ "r" (TSB_REG), "i" (ASI_IMMU));
+}
+
void __init smp_callin(void)
{
int cpuid = hard_smp_processor_id();
__flush_tlb_all();
+ cpu_setup_percpu_base(cpuid);
+
smp_setup_percpu_timer();
+ if (cheetah_pcache_forced_on)
+ cheetah_enable_pcache();
+
local_irq_enable();
calibrate_delay();
/* Clear this or we will die instantly when we
* schedule back to this idler...
*/
- clear_thread_flag(TIF_NEWCHILD);
+ current_thread_info()->new_child = 0;
/* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
while (!cpu_isset(cpuid, smp_commenced_mask))
- membar("#LoadLoad");
+ rmb();
cpu_set(cpuid, cpu_online_map);
+
+ /* idle thread is expected to have preempt disabled */
+ preempt_disable();
}
void cpu_panic(void)
panic("SMP bolixed\n");
}
-static unsigned long current_tick_offset;
+static unsigned long current_tick_offset __read_mostly;
/* This tick register synchronization scheme is taken entirely from
* the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
#define NUM_ROUNDS 64 /* magic value */
#define NUM_ITERS 5 /* likewise */
-static spinlock_t itc_sync_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(itc_sync_lock);
static unsigned long go[SLAVE + 1];
#define DEBUG_TICK_SYNC 0
for (i = 0; i < NUM_ITERS; i++) {
t0 = tick_ops->get_tick();
go[MASTER] = 1;
- membar("#StoreLoad");
+ membar_storeload();
while (!(tm = go[SLAVE]))
- membar("#LoadLoad");
+ rmb();
go[SLAVE] = 0;
- membar("#StoreStore");
+ wmb();
t1 = tick_ops->get_tick();
if (t1 - t0 < best_t1 - best_t0)
go[MASTER] = 1;
while (go[MASTER])
- membar("#LoadLoad");
+ rmb();
local_irq_save(flags);
{
/* wait for client to be ready */
while (!go[MASTER])
- membar("#LoadLoad");
+ rmb();
/* now let the client proceed into his loop */
go[MASTER] = 0;
- membar("#StoreLoad");
+ membar_storeload();
spin_lock_irqsave(&itc_sync_lock, flags);
{
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
while (!go[MASTER])
- membar("#LoadLoad");
+ rmb();
go[MASTER] = 0;
- membar("#StoreStore");
+ wmb();
go[SLAVE] = tick_ops->get_tick();
- membar("#StoreLoad");
+ membar_storeload();
}
}
spin_unlock_irqrestore(&itc_sync_lock, flags);
p = fork_idle(cpu);
callin_flag = 0;
- cpu_new_thread = p->thread_info;
+ cpu_new_thread = task_thread_info(p);
cpu_set(cpu, cpu_callout_map);
cpu_find_by_mid(cpu, &cpu_node);
int wait;
};
-static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(call_lock);
static struct call_data_struct *call_data;
extern unsigned long xcall_call_function;
extern unsigned long xcall_flush_tlb_all_cheetah;
extern unsigned long xcall_report_regs;
extern unsigned long xcall_receive_signal;
+
+#ifdef DCACHE_ALIASING_POSSIBLE
extern unsigned long xcall_flush_dcache_page_cheetah;
+#endif
extern unsigned long xcall_flush_dcache_page_spitfire;
#ifdef CONFIG_DEBUG_DCFLUSH
static __inline__ void __local_flush_dcache_page(struct page *page)
{
-#if (L1DCACHE_SIZE > PAGE_SIZE)
+#ifdef DCACHE_ALIASING_POSSIBLE
__flush_dcache_page(page_address(page),
((tlb_type == spitfire) &&
page_mapping(page) != NULL));
(u64) pg_addr,
mask);
} else {
+#ifdef DCACHE_ALIASING_POSSIBLE
data0 =
((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0,
__pa(pg_addr),
0, mask);
+#endif
}
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes_xcall);
(u64) pg_addr,
mask);
} else {
+#ifdef DCACHE_ALIASING_POSSIBLE
data0 = ((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0,
__pa(pg_addr),
0, mask);
+#endif
}
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes_xcall);
* questionable (in theory the big win for threads is the massive sharing of
* address space state across processors).
*/
+
+/* This currently is only used by the hugetlb arch pre-fault
+ * hook on UltraSPARC-III+ and later when changing the pagesize
+ * bits of the context register for an address space.
+ */
void smp_flush_tlb_mm(struct mm_struct *mm)
{
- /*
- * This code is called from two places, dup_mmap and exit_mmap. In the
- * former case, we really need a flush. In the later case, the callers
- * are single threaded exec_mmap (really need a flush), multithreaded
- * exec_mmap case (do not need to flush, since the caller gets a new
- * context via activate_mm), and all other callers of mmput() whence
- * the flush can be optimized since the associated threads are dead and
- * the mm is being torn down (__exit_mm and other mmput callers) or the
- * owning thread is dissociating itself from the mm. The
- * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
- * for single thread exec and dup_mmap cases. An alternate check might
- * have been (current->mm != mm).
- * Kanoj Sarcar
- */
- if (atomic_read(&mm->mm_users) == 0)
- return;
-
- {
- u32 ctx = CTX_HWBITS(mm->context);
- int cpu = get_cpu();
+ u32 ctx = CTX_HWBITS(mm->context);
+ int cpu = get_cpu();
- if (atomic_read(&mm->mm_users) == 1) {
- mm->cpu_vm_mask = cpumask_of_cpu(cpu);
- goto local_flush_and_out;
- }
+ if (atomic_read(&mm->mm_users) == 1) {
+ mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+ goto local_flush_and_out;
+ }
- smp_cross_call_masked(&xcall_flush_tlb_mm,
- ctx, 0, 0,
- mm->cpu_vm_mask);
+ smp_cross_call_masked(&xcall_flush_tlb_mm,
+ ctx, 0, 0,
+ mm->cpu_vm_mask);
- local_flush_and_out:
- __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
+local_flush_and_out:
+ __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
- put_cpu();
- }
+ put_cpu();
}
void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
u32 ctx = CTX_HWBITS(mm->context);
int cpu = get_cpu();
- if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
+ if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
mm->cpu_vm_mask = cpumask_of_cpu(cpu);
- goto local_flush_and_out;
- } else {
- /* This optimization is not valid. Normally
- * we will be holding the page_table_lock, but
- * there is an exception which is copy_page_range()
- * when forking. The lock is held during the individual
- * page table updates in the parent, but not at the
- * top level, which is where we are invoked.
- */
- if (0) {
- cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
-
- /* By virtue of running under the mm->page_table_lock,
- * and mmu_context.h:switch_mm doing the same, the
- * following operation is safe.
- */
- if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
- goto local_flush_and_out;
- }
- }
-
- smp_cross_call_masked(&xcall_flush_tlb_pending,
- ctx, nr, (unsigned long) vaddrs,
- mm->cpu_vm_mask);
+ else
+ smp_cross_call_masked(&xcall_flush_tlb_pending,
+ ctx, nr, (unsigned long) vaddrs,
+ mm->cpu_vm_mask);
-local_flush_and_out:
__flush_tlb_pending(ctx, nr, vaddrs);
put_cpu();
void smp_capture(void)
{
- int result = __atomic_add(1, &smp_capture_depth);
+ int result = atomic_add_ret(1, &smp_capture_depth);
- membar("#StoreStore | #LoadStore");
if (result == 1) {
int ncpus = num_online_cpus();
smp_processor_id());
#endif
penguins_are_doing_time = 1;
- membar("#StoreStore | #LoadStore");
+ membar_storestore_loadstore();
atomic_inc(&smp_capture_registry);
smp_cross_call(&xcall_capture, 0, 0, 0);
while (atomic_read(&smp_capture_registry) != ncpus)
- membar("#LoadLoad");
+ rmb();
#ifdef CAPTURE_DEBUG
printk("done\n");
#endif
smp_processor_id());
#endif
penguins_are_doing_time = 0;
- membar("#StoreStore | #StoreLoad");
+ membar_storeload_storestore();
atomic_dec(&smp_capture_registry);
}
}
save_alternate_globals(global_save);
prom_world(1);
atomic_inc(&smp_capture_registry);
- membar("#StoreLoad | #StoreStore");
+ membar_storeload_storestore();
while (penguins_are_doing_time)
- membar("#LoadLoad");
+ rmb();
restore_alternate_globals(global_save);
atomic_dec(&smp_capture_registry);
prom_world(0);
preempt_enable();
}
-extern unsigned long xcall_promstop;
-
-void smp_promstop_others(void)
-{
- smp_cross_call(&xcall_promstop, 0, 0, 0);
-}
-
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
}
-cycles_t cacheflush_time;
-unsigned long cache_decay_ticks;
-
-extern unsigned long cheetah_tune_scheduling(void);
-
-static void __init smp_tune_scheduling(void)
-{
- unsigned long orig_flush_base, flush_base, flags, *p;
- unsigned int ecache_size, order;
- cycles_t tick1, tick2, raw;
- int cpu_node;
-
- /* Approximate heuristic for SMP scheduling. It is an
- * estimation of the time it takes to flush the L2 cache
- * on the local processor.
- *
- * The ia32 chooses to use the L1 cache flush time instead,
- * and I consider this complete nonsense. The Ultra can service
- * a miss to the L1 with a hit to the L2 in 7 or 8 cycles, and
- * L2 misses are what create extra bus traffic (ie. the "cost"
- * of moving a process from one cpu to another).
- */
- printk("SMP: Calibrating ecache flush... ");
- if (tlb_type == cheetah || tlb_type == cheetah_plus) {
- cacheflush_time = cheetah_tune_scheduling();
- goto report;
- }
-
- cpu_find_by_instance(0, &cpu_node, NULL);
- ecache_size = prom_getintdefault(cpu_node,
- "ecache-size", (512 * 1024));
- if (ecache_size > (4 * 1024 * 1024))
- ecache_size = (4 * 1024 * 1024);
- orig_flush_base = flush_base =
- __get_free_pages(GFP_KERNEL, order = get_order(ecache_size));
-
- if (flush_base != 0UL) {
- local_irq_save(flags);
-
- /* Scan twice the size once just to get the TLB entries
- * loaded and make sure the second scan measures pure misses.
- */
- for (p = (unsigned long *)flush_base;
- ((unsigned long)p) < (flush_base + (ecache_size<<1));
- p += (64 / sizeof(unsigned long)))
- *((volatile unsigned long *)p);
-
- tick1 = tick_ops->get_tick();
-
- __asm__ __volatile__("1:\n\t"
- "ldx [%0 + 0x000], %%g1\n\t"
- "ldx [%0 + 0x040], %%g2\n\t"
- "ldx [%0 + 0x080], %%g3\n\t"
- "ldx [%0 + 0x0c0], %%g5\n\t"
- "add %0, 0x100, %0\n\t"
- "cmp %0, %2\n\t"
- "bne,pt %%xcc, 1b\n\t"
- " nop"
- : "=&r" (flush_base)
- : "0" (flush_base),
- "r" (flush_base + ecache_size)
- : "g1", "g2", "g3", "g5");
-
- tick2 = tick_ops->get_tick();
-
- local_irq_restore(flags);
-
- raw = (tick2 - tick1);
-
- /* Dampen it a little, considering two processes
- * sharing the cache and fitting.
- */
- cacheflush_time = (raw - (raw >> 2));
-
- free_pages(orig_flush_base, order);
- } else {
- cacheflush_time = ((ecache_size << 2) +
- (ecache_size << 1));
- }
-report:
- /* Convert ticks/sticks to jiffies. */
- cache_decay_ticks = cacheflush_time / timer_tick_offset;
- if (cache_decay_ticks < 1)
- cache_decay_ticks = 1;
-
- printk("Using heuristic of %ld cycles, %ld ticks.\n",
- cacheflush_time, cache_decay_ticks);
-}
-
/* /proc/profile writes can call this, don't __init it please. */
-static spinlock_t prof_setup_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(prof_setup_lock);
int setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
+/* Constrain the number of cpus to max_cpus. */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int instance, mid;
-
- instance = 0;
- while (!cpu_find_by_instance(instance, NULL, &mid)) {
- if (mid < max_cpus)
- cpu_set(mid, phys_cpu_present_map);
- instance++;
- }
-
if (num_possible_cpus() > max_cpus) {
+ int instance, mid;
+
instance = 0;
while (!cpu_find_by_instance(instance, NULL, &mid)) {
if (mid != boot_cpu_id) {
smp_store_cpu_info(boot_cpu_id);
}
+/* Set this up early so that things like the scheduler can init
+ * properly. We use the same cpu mask for both the present and
+ * possible cpu map.
+ */
+void __init smp_setup_cpu_possible_map(void)
+{
+ int instance, mid;
+
+ instance = 0;
+ while (!cpu_find_by_instance(instance, NULL, &mid)) {
+ if (mid < NR_CPUS)
+ cpu_set(mid, phys_cpu_present_map);
+ instance++;
+ }
+}
+
void __devinit smp_prepare_boot_cpu(void)
{
if (hard_smp_processor_id() >= NR_CPUS) {
}
current_thread_info()->cpu = hard_smp_processor_id();
+
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), phys_cpu_present_map);
}
(long) num_online_cpus(),
bogosum/(500000/HZ),
(bogosum/(5000/HZ))%100);
-
- /* We want to run this with all the other cpus spinning
- * in the kernel.
- */
- smp_tune_scheduling();
}
-/* This needn't do anything as we do not sleep the cpu
- * inside of the idler task, so an interrupt is not needed
- * to get a clean fast response.
- *
- * XXX Reverify this assumption... -DaveM
- *
- * Addendum: We do want it to do something for the signal
- * delivery case, we detect that by just seeing
- * if we are trying to send this to an idler or not.
- */
void smp_send_reschedule(int cpu)
{
- if (cpu_data(cpu).idle_volume == 0)
- smp_receive_signal(cpu);
+ smp_receive_signal(cpu);
}
/* This is a nop because we capture all other cpus
{
}
+unsigned long __per_cpu_base __read_mostly;
+unsigned long __per_cpu_shift __read_mostly;
+
+EXPORT_SYMBOL(__per_cpu_base);
+EXPORT_SYMBOL(__per_cpu_shift);
+
+void __init setup_per_cpu_areas(void)
+{
+ unsigned long goal, size, i;
+ char *ptr;
+ /* Created by linker magic */
+ extern char __per_cpu_start[], __per_cpu_end[];
+
+ /* Copy section for each CPU (we discard the original) */
+ goal = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
+
+#ifdef CONFIG_MODULES
+ if (goal < PERCPU_ENOUGH_ROOM)
+ goal = PERCPU_ENOUGH_ROOM;
+#endif
+ __per_cpu_shift = 0;
+ for (size = 1UL; size < goal; size <<= 1UL)
+ __per_cpu_shift++;
+
+ /* Make sure the resulting __per_cpu_base value
+ * will fit in the 43-bit sign extended IMMU
+ * TSB register.
+ */
+ ptr = __alloc_bootmem(size * NR_CPUS, PAGE_SIZE,
+ (unsigned long) __per_cpu_start);
+
+ __per_cpu_base = ptr - __per_cpu_start;
+
+ if ((__per_cpu_shift < PAGE_SHIFT) ||
+ (__per_cpu_base & ~PAGE_MASK) ||
+ (__per_cpu_base != (((long) __per_cpu_base << 20) >> 20))) {
+ prom_printf("PER_CPU: Invalid layout, "
+ "ptr[%p] shift[%lx] base[%lx]\n",
+ ptr, __per_cpu_shift, __per_cpu_base);
+ prom_halt();
+ }
+
+ for (i = 0; i < NR_CPUS; i++, ptr += size)
+ memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+
+ /* Finally, load in the boot cpu's base value.
+ * We abuse the IMMU TSB register for trap handler
+ * entry and exit loading of %g5. That is why it
+ * has to be page aligned.
+ */
+ cpu_setup_percpu_base(hard_smp_processor_id());
+}