#include <linux/seq_file.h>
#include <linux/cache.h>
#include <linux/jiffies.h>
+#include <linux/profile.h>
#include <asm/head.h>
#include <asm/ptrace.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/oplib.h>
-#include <asm/hardirq.h>
#include <asm/uaccess.h>
#include <asm/timer.h>
#include <asm/starfire.h>
+#include <asm/tlb.h>
extern int linux_num_cpus;
extern void calibrate_delay(void);
void __init smp_callin(void)
{
int cpuid = hard_smp_processor_id();
- extern int bigkernel;
- extern unsigned long kern_locked_tte_data;
-
- if (bigkernel) {
- prom_dtlb_load(sparc64_highest_locked_tlbent()-1,
- kern_locked_tte_data + 0x400000, KERNBASE + 0x400000);
- prom_itlb_load(sparc64_highest_locked_tlbent()-1,
- kern_locked_tte_data + 0x400000, KERNBASE + 0x400000);
- }
inherit_locked_prom_mappings(0);
#define NUM_ROUNDS 64 /* magic value */
#define NUM_ITERS 5 /* likewise */
-static spinlock_t itc_sync_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(itc_sync_lock);
static unsigned long go[SLAVE + 1];
#define DEBUG_TICK_SYNC 0
struct task_struct *p;
int timeout, ret, cpu_node;
- kernel_thread(NULL, NULL, CLONE_IDLETASK);
-
- p = prev_task(&init_task);
-
- init_idle(p, cpu);
-
- unhash_process(p);
-
+ p = fork_idle(cpu);
callin_flag = 0;
cpu_new_thread = p->thread_info;
cpu_set(cpu, cpu_callout_map);
int i;
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_isset(i, mask)) {
- spitfire_xcall_helper(data0, data1, data2, pstate, i);
- cpu_clear(i, mask);
- if (cpus_empty(mask))
- break;
- }
- }
+ for_each_cpu_mask(i, mask)
+ spitfire_xcall_helper(data0, data1, data2, pstate, i);
}
/* Cheetah now allows to send the whole 64-bytes of data in the interrupt
* packet, but we have no use for that. However we do take advantage of
* the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
*/
-#if NR_CPUS > 32
-#error Fixup cheetah_xcall_deliver Dave...
-#endif
static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
{
u64 pstate, ver;
nack_busy_id = 0;
{
- cpumask_t work_mask = mask;
int i;
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_isset(i, work_mask)) {
- u64 target = (i << 14) | 0x70;
-
- if (!is_jalapeno)
- target |= (nack_busy_id << 24);
- __asm__ __volatile__(
- "stxa %%g0, [%0] %1\n\t"
- "membar #Sync\n\t"
- : /* no outputs */
- : "r" (target), "i" (ASI_INTR_W));
- nack_busy_id++;
- cpu_clear(i, work_mask);
- if (cpus_empty(work_mask))
- break;
- }
+ for_each_cpu_mask(i, mask) {
+ u64 target = (i << 14) | 0x70;
+
+ if (!is_jalapeno)
+ target |= (nack_busy_id << 24);
+ __asm__ __volatile__(
+ "stxa %%g0, [%0] %1\n\t"
+ "membar #Sync\n\t"
+ : /* no outputs */
+ : "r" (target), "i" (ASI_INTR_W));
+ nack_busy_id++;
}
}
printk("CPU[%d]: mondo stuckage result[%016lx]\n",
smp_processor_id(), dispatch_stat);
} else {
- cpumask_t work_mask = mask;
int i, this_busy_nack = 0;
/* Delay some random time with interrupts enabled
/* Clear out the mask bits for cpus which did not
* NACK us.
*/
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_isset(i, work_mask)) {
- u64 check_mask;
-
- if (is_jalapeno)
- check_mask = (0x2UL << (2*i));
- else
- check_mask = (0x2UL <<
- this_busy_nack);
- if ((dispatch_stat & check_mask) == 0)
- cpu_clear(i, mask);
- this_busy_nack += 2;
- cpu_clear(i, work_mask);
- if (cpus_empty(work_mask))
- break;
- }
+ for_each_cpu_mask(i, mask) {
+ u64 check_mask;
+
+ if (is_jalapeno)
+ check_mask = (0x2UL << (2*i));
+ else
+ check_mask = (0x2UL <<
+ this_busy_nack);
+ if ((dispatch_stat & check_mask) == 0)
+ cpu_clear(i, mask);
+ this_busy_nack += 2;
}
goto retry;
static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
{
u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
+ int this_cpu = get_cpu();
cpus_and(mask, mask, cpu_online_map);
- cpu_clear(smp_processor_id(), mask);
+ cpu_clear(this_cpu, mask);
if (tlb_type == spitfire)
spitfire_xcall_deliver(data0, data1, data2, mask);
else
cheetah_xcall_deliver(data0, data1, data2, mask);
/* NOTE: Caller runs local copy on master. */
+
+ put_cpu();
}
extern unsigned long xcall_sync_tick;
int wait;
};
-static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(call_lock);
static struct call_data_struct *call_data;
extern unsigned long xcall_call_function;
if (!cpus)
return 0;
+ /* Can deadlock when called with interrupts disabled */
+ WARN_ON(irqs_disabled());
+
data.func = func;
data.info = info;
atomic_set(&data.finished, 0);
}
}
-extern unsigned long xcall_flush_tlb_page;
extern unsigned long xcall_flush_tlb_mm;
-extern unsigned long xcall_flush_tlb_range;
+extern unsigned long xcall_flush_tlb_pending;
extern unsigned long xcall_flush_tlb_kernel_range;
extern unsigned long xcall_flush_tlb_all_spitfire;
extern unsigned long xcall_flush_tlb_all_cheetah;
static __inline__ void __local_flush_dcache_page(struct page *page)
{
#if (L1DCACHE_SIZE > PAGE_SIZE)
- __flush_dcache_page(page->virtual,
+ __flush_dcache_page(page_address(page),
((tlb_type == spitfire) &&
page_mapping(page) != NULL));
#else
if (page_mapping(page) != NULL &&
tlb_type == spitfire)
- __flush_icache_page(__pa(page->virtual));
+ __flush_icache_page(__pa(page_address(page)));
#endif
}
void smp_flush_dcache_page_impl(struct page *page, int cpu)
{
cpumask_t mask = cpumask_of_cpu(cpu);
+ int this_cpu = get_cpu();
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes);
#endif
- if (cpu == smp_processor_id()) {
+ if (cpu == this_cpu) {
__local_flush_dcache_page(page);
} else if (cpu_online(cpu)) {
+ void *pg_addr = page_address(page);
u64 data0;
if (tlb_type == spitfire) {
if (page_mapping(page) != NULL)
data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0,
- __pa(page->virtual),
- (u64) page->virtual,
+ __pa(pg_addr),
+ (u64) pg_addr,
mask);
} else {
data0 =
((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0,
- __pa(page->virtual),
+ __pa(pg_addr),
0, mask);
}
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes_xcall);
#endif
}
+
+ put_cpu();
}
void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
{
+ void *pg_addr = page_address(page);
cpumask_t mask = cpu_online_map;
u64 data0;
+ int this_cpu = get_cpu();
- cpu_clear(smp_processor_id(), mask);
+ cpu_clear(this_cpu, mask);
#ifdef CONFIG_DEBUG_DCFLUSH
atomic_inc(&dcpage_flushes);
if (page_mapping(page) != NULL)
data0 |= ((u64)1 << 32);
spitfire_xcall_deliver(data0,
- __pa(page->virtual),
- (u64) page->virtual,
+ __pa(pg_addr),
+ (u64) pg_addr,
mask);
} else {
data0 = ((u64)&xcall_flush_dcache_page_cheetah);
cheetah_xcall_deliver(data0,
- __pa(page->virtual),
+ __pa(pg_addr),
0, mask);
}
#ifdef CONFIG_DEBUG_DCFLUSH
#endif
flush_self:
__local_flush_dcache_page(page);
+
+ put_cpu();
}
void smp_receive_signal(int cpu)
{
u32 ctx = CTX_HWBITS(mm->context);
- int cpu = smp_processor_id();
+ int cpu = get_cpu();
if (atomic_read(&mm->mm_users) == 1) {
- /* See smp_flush_tlb_page for info about this. */
mm->cpu_vm_mask = cpumask_of_cpu(cpu);
goto local_flush_and_out;
}
local_flush_and_out:
__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
+
+ put_cpu();
}
}
-void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
- unsigned long end)
+void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
{
u32 ctx = CTX_HWBITS(mm->context);
- int cpu = smp_processor_id();
-
- start &= PAGE_MASK;
- end = PAGE_ALIGN(end);
+ int cpu = get_cpu();
if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
mm->cpu_vm_mask = cpumask_of_cpu(cpu);
goto local_flush_and_out;
+ } else {
+ /* This optimization is not valid. Normally
+ * we will be holding the page_table_lock, but
+ * there is an exception which is copy_page_range()
+ * when forking. The lock is held during the individual
+ * page table updates in the parent, but not at the
+ * top level, which is where we are invoked.
+ */
+ if (0) {
+ cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
+
+ /* By virtue of running under the mm->page_table_lock,
+ * and mmu_context.h:switch_mm doing the same, the
+ * following operation is safe.
+ */
+ if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
+ goto local_flush_and_out;
+ }
}
- smp_cross_call_masked(&xcall_flush_tlb_range,
- ctx, start, end,
+ smp_cross_call_masked(&xcall_flush_tlb_pending,
+ ctx, nr, (unsigned long) vaddrs,
mm->cpu_vm_mask);
- local_flush_and_out:
- __flush_tlb_range(ctx, start, SECONDARY_CONTEXT,
- end, PAGE_SIZE, (end-start));
+local_flush_and_out:
+ __flush_tlb_pending(ctx, nr, vaddrs);
+
+ put_cpu();
}
void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
}
}
-void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
-{
- {
- u32 ctx = CTX_HWBITS(mm->context);
- int cpu = smp_processor_id();
-
- page &= PAGE_MASK;
- if (mm == current->active_mm &&
- atomic_read(&mm->mm_users) == 1) {
- /* By virtue of being the current address space, and
- * having the only reference to it, the following
- * operation is safe.
- *
- * It would not be a win to perform the xcall tlb
- * flush in this case, because even if we switch back
- * to one of the other processors in cpu_vm_mask it
- * is almost certain that all TLB entries for this
- * context will be replaced by the time that happens.
- */
- mm->cpu_vm_mask = cpumask_of_cpu(cpu);
- goto local_flush_and_out;
- } else {
- cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
-
- /* By virtue of running under the mm->page_table_lock,
- * and mmu_context.h:switch_mm doing the same, the
- * following operation is safe.
- */
- if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
- goto local_flush_and_out;
- }
-
- /* OK, we have to actually perform the cross call. Most
- * likely this is a cloned mm or kswapd is kicking out pages
- * for a task which has run recently on another cpu.
- */
- smp_cross_call_masked(&xcall_flush_tlb_page,
- ctx, page, 0,
- mm->cpu_vm_mask);
- if (!cpu_isset(cpu, mm->cpu_vm_mask))
- return;
-
- local_flush_and_out:
- __flush_tlb_page(ctx, page, SECONDARY_CONTEXT);
- }
-}
-
/* CPU capture. */
/* #define CAPTURE_DEBUG */
extern unsigned long xcall_capture;
void smp_capture(void)
{
- int result = __atomic_add(1, &smp_capture_depth);
+ int result = atomic_add_ret(1, &smp_capture_depth);
- membar("#StoreStore | #LoadStore");
if (result == 1) {
int ncpus = num_online_cpus();
smp_cross_call(&xcall_promstop, 0, 0, 0);
}
-extern void sparc64_do_profile(struct pt_regs *regs);
-
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
}
do {
- sparc64_do_profile(regs);
+ profile_tick(CPU_PROFILING, regs);
if (!--prof_counter(cpu)) {
irq_enter();
boot_cpu_id = hard_smp_processor_id();
current_tick_offset = timer_tick_offset;
- if (boot_cpu_id >= NR_CPUS) {
- prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
- prom_halt();
- }
-
cpu_set(boot_cpu_id, cpu_online_map);
prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
}
}
/* /proc/profile writes can call this, don't __init it please. */
-static spinlock_t prof_setup_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(prof_setup_lock);
int setup_profiling_timer(unsigned int multiplier)
{
void __devinit smp_prepare_boot_cpu(void)
{
+ if (hard_smp_processor_id() >= NR_CPUS) {
+ prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
+ prom_halt();
+ }
+
current_thread_info()->cpu = hard_smp_processor_id();
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), phys_cpu_present_map);