2 * SN2 Platform specific SMP Support
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/spinlock.h>
14 #include <linux/threads.h>
15 #include <linux/sched.h>
16 #include <linux/smp.h>
17 #include <linux/interrupt.h>
18 #include <linux/irq.h>
19 #include <linux/mmzone.h>
20 #include <linux/module.h>
22 #include <asm/processor.h>
24 #include <asm/sn/sgi.h>
26 #include <asm/system.h>
27 #include <asm/delay.h>
32 #include <asm/bitops.h>
33 #include <asm/hw_irq.h>
34 #include <asm/current.h>
35 #include <asm/sn/sn_cpuid.h>
36 #include <asm/sn/addrs.h>
37 #include <asm/sn/sn2/shub_mmr.h>
38 #include <asm/sn/nodepda.h>
39 #include <asm/sn/rw_mmr.h>
41 void sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1);
44 static spinlock_t sn2_global_ptc_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
46 static unsigned long sn2_ptc_deadlock_count;
49 static inline unsigned long
52 volatile unsigned long *piows;
55 piows = pda->pio_write_status_addr;
58 } while (((ws = *piows) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
59 SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK);
65 sn_tlb_migrate_finish(struct mm_struct *mm)
67 if (mm == current->mm)
73 * sn2_global_tlb_purge - globally purge translation cache of virtual address range
74 * @start: start of virtual address range
75 * @end: end of virtual address range
76 * @nbits: specifies number of bytes to purge per instruction (num = 1<<(nbits & 0xfc))
78 * Purges the translation caches of all processors of the given virtual address
82 * - cpu_vm_mask is a bit mask that indicates which cpus have loaded the context.
83 * - cpu_vm_mask is converted into a nodemask of the nodes containing the
84 * cpus in cpu_vm_mask.
85 * - if only one bit is set in cpu_vm_mask & it is the current cpu,
86 * then only the local TLB needs to be flushed. This flushing can be done
87 * using ptc.l. This is the common case & avoids the global spinlock.
88 * - if multiple cpus have loaded the context, then flushing has to be
89 * done with ptc.g/MMRs under protection of the global ptc_lock.
93 sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
95 int i, cnode, mynasid, cpu, lcpu=0, nasid, flushed=0;
96 volatile unsigned long *ptc0, *ptc1;
97 unsigned long flags=0, data0, data1;
98 struct mm_struct *mm=current->active_mm;
99 short nasids[NR_NODES], nix;
100 DECLARE_BITMAP(nodes_flushed, NR_NODES);
102 bitmap_zero(nodes_flushed, NR_NODES);
106 for_each_cpu_mask(cpu, mm->cpu_vm_mask) {
107 cnode = cpu_to_node(cpu);
108 __set_bit(cnode, nodes_flushed);
115 if (likely(i == 1 && lcpu == smp_processor_id())) {
117 ia64_ptcl(start, nbits<<2);
118 start += (1UL << nbits);
119 } while (start < end);
125 if (atomic_read(&mm->mm_users) == 1) {
133 for (cnode=find_first_bit(&nodes_flushed, NR_NODES); cnode < NR_NODES;
134 cnode=find_next_bit(&nodes_flushed, NR_NODES, ++cnode))
135 nasids[nix++] = cnodeid_to_nasid(cnode);
138 data0 = (1UL<<SH_PTC_0_A_SHFT) |
139 (nbits<<SH_PTC_0_PS_SHFT) |
140 ((ia64_get_rr(start)>>8)<<SH_PTC_0_RID_SHFT) |
141 (1UL<<SH_PTC_0_START_SHFT);
143 ptc0 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_0);
144 ptc1 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
147 mynasid = smp_physical_node_id();
149 spin_lock_irqsave(&sn2_global_ptc_lock, flags);
152 data1 = start | (1UL<<SH_PTC_1_START_SHFT);
153 for (i=0; i<nix; i++) {
155 if (likely(nasid == mynasid)) {
156 ia64_ptcga(start, nbits<<2);
159 ptc0 = CHANGE_NASID(nasid, ptc0);
160 ptc1 = CHANGE_NASID(nasid, ptc1);
161 pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
166 if (flushed && (wait_piowc() & SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK_MASK)) {
167 sn2_ptc_deadlock_recovery(data0, data1);
170 start += (1UL << nbits);
172 } while (start < end);
174 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
180 * sn2_ptc_deadlock_recovery
182 * Recover from PTC deadlocks conditions. Recovery requires stepping thru each
183 * TLB flush transaction. The recovery sequence is somewhat tricky & is
184 * coded in assembly language.
187 sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1)
189 extern void sn2_ptc_deadlock_recovery_core(long*, long, long*, long, long*);
190 int cnode, mycnode, nasid;
191 long *ptc0, *ptc1, *piows;
193 sn2_ptc_deadlock_count++;
195 ptc0 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_0);
196 ptc1 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
197 piows = (long*)pda->pio_write_status_addr;
199 mycnode = numa_node_id();
201 for (cnode = 0; cnode < numnodes; cnode++) {
202 if (is_headless_node(cnode) || cnode == mycnode)
204 nasid = cnodeid_to_nasid(cnode);
205 ptc0 = CHANGE_NASID(nasid, ptc0);
206 ptc1 = CHANGE_NASID(nasid, ptc1);
207 sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows);
212 * sn_send_IPI_phys - send an IPI to a Nasid and slice
213 * @physid: physical cpuid to receive the interrupt.
214 * @vector: command to send
215 * @delivery_mode: delivery mechanism
217 * Sends an IPI (interprocessor interrupt) to the processor specified by
220 * @delivery_mode can be one of the following
222 * %IA64_IPI_DM_INT - pend an interrupt
223 * %IA64_IPI_DM_PMI - pend a PMI
224 * %IA64_IPI_DM_NMI - pend an NMI
225 * %IA64_IPI_DM_INIT - pend an INIT interrupt
228 sn_send_IPI_phys(long physid, int vector, int delivery_mode)
230 long nasid, slice, val;
231 unsigned long flags=0;
234 nasid = cpu_physical_id_to_nasid(physid);
235 slice = cpu_physical_id_to_slice(physid);
237 p = (long*)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
238 val = (1UL<<SH_IPI_INT_SEND_SHFT) |
239 (physid<<SH_IPI_INT_PID_SHFT) |
240 ((long)delivery_mode<<SH_IPI_INT_TYPE_SHFT) |
241 ((long)vector<<SH_IPI_INT_IDX_SHFT) |
242 (0x000feeUL<<SH_IPI_INT_BASE_SHFT);
245 if (enable_shub_wars_1_1() ) {
246 spin_lock_irqsave(&sn2_global_ptc_lock, flags);
248 pio_phys_write_mmr(p, val);
249 if (enable_shub_wars_1_1() ) {
251 spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
255 EXPORT_SYMBOL(sn_send_IPI_phys);
258 * sn2_send_IPI - send an IPI to a processor
259 * @cpuid: target of the IPI
260 * @vector: command to send
261 * @delivery_mode: delivery mechanism
262 * @redirect: redirect the IPI?
264 * Sends an IPI (InterProcessor Interrupt) to the processor specified by
265 * @cpuid. @vector specifies the command to send, while @delivery_mode can
266 * be one of the following
268 * %IA64_IPI_DM_INT - pend an interrupt
269 * %IA64_IPI_DM_PMI - pend a PMI
270 * %IA64_IPI_DM_NMI - pend an NMI
271 * %IA64_IPI_DM_INIT - pend an INIT interrupt
274 sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
278 physid = cpu_physical_id(cpuid);
280 sn_send_IPI_phys(physid, vector, delivery_mode);