patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / arch / ia64 / sn / io / sn2 / ml_SN_intr.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
7  */
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/interrupt.h>
12 #include <asm/smp.h>
13 #include <asm/irq.h>
14 #include <asm/hw_irq.h>
15 #include <asm/topology.h>
16 #include <asm/sn/sgi.h>
17 #include <asm/sn/iograph.h>
18 #include <asm/sn/hcl.h>
19 #include <asm/sn/labelcl.h>
20 #include <asm/sn/io.h>
21 #include <asm/sn/sn_private.h>
22 #include <asm/sn/klconfig.h>
23 #include <asm/sn/sn_cpuid.h>
24 #include <asm/sn/pci/pciio.h>
25 #include <asm/sn/pci/pcibr.h>
26 #include <asm/sn/xtalk/xtalk.h>
27 #include <asm/sn/pci/pcibr_private.h>
28 #include <asm/sn/intr.h>
29 #include <asm/sn/sn2/shub_mmr_t.h>
30 #include <asm/sn/sn2/shubio.h>
31 #include <asm/sal.h>
32 #include <asm/sn/sn_sal.h>
33 #include <asm/sn/sn2/shub_mmr.h>
34 #include <asm/sn/pda.h>
35
36 extern irqpda_t *irqpdaindr;
37 extern cnodeid_t master_node_get(vertex_hdl_t vhdl);
38 extern nasid_t master_nasid;
39
40 /*  Initialize some shub registers for interrupts, both IO and error. */
41 void intr_init_vecblk(cnodeid_t node)
42 {
43         int                             nasid = cnodeid_to_nasid(node);
44         sh_ii_int0_config_u_t           ii_int_config;
45         cpuid_t                         cpu;
46         cpuid_t                         cpu0, cpu1;
47         sh_ii_int0_enable_u_t           ii_int_enable;
48         sh_int_node_id_config_u_t       node_id_config;
49         sh_local_int5_config_u_t        local5_config;
50         sh_local_int5_enable_u_t        local5_enable;
51
52         if (is_headless_node(node) ) {
53                 struct ia64_sal_retval ret_stuff;
54                 int cnode;
55
56                 /* retarget all interrupts on this node to the master node. */
57                 node_id_config.sh_int_node_id_config_regval = 0;
58                 node_id_config.sh_int_node_id_config_s.node_id = master_nasid;
59                 node_id_config.sh_int_node_id_config_s.id_sel = 1;
60                 HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_INT_NODE_ID_CONFIG),
61                         node_id_config.sh_int_node_id_config_regval);
62                 cnode = nasid_to_cnodeid(master_nasid);
63                 cpu = first_cpu(node_to_cpumask(cnode));
64                 cpu = cpu_physical_id(cpu);
65                 SAL_CALL(ret_stuff, SN_SAL_REGISTER_CE, nasid, cpu, master_nasid,0,0,0,0);
66                 if (ret_stuff.status < 0)
67                         printk("%s: SN_SAL_REGISTER_CE SAL_CALL failed\n",__FUNCTION__);
68         } else {
69                 cpu = first_cpu(node_to_cpumask(node));
70                 cpu = cpu_physical_id(cpu);
71         }
72
73         /* Get the physical id's of the cpu's on this node. */
74         cpu0 = nasid_slice_to_cpu_physical_id(nasid, 0);
75         cpu1 = nasid_slice_to_cpu_physical_id(nasid, 2);
76
77         HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_PI_ERROR_MASK), 0);
78         HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_PI_CRBP_ERROR_MASK), 0);
79
80         /* Config and enable UART interrupt, all nodes. */
81         local5_config.sh_local_int5_config_regval = 0;
82         local5_config.sh_local_int5_config_s.idx = SGI_UART_VECTOR;
83         local5_config.sh_local_int5_config_s.pid = cpu;
84         HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_CONFIG),
85                 local5_config.sh_local_int5_config_regval);
86
87         local5_enable.sh_local_int5_enable_regval = 0;
88         local5_enable.sh_local_int5_enable_s.uart_int = 1;
89         HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_ENABLE),
90                 local5_enable.sh_local_int5_enable_regval);
91
92
93         /* The II_INT_CONFIG register for cpu 0. */
94         ii_int_config.sh_ii_int0_config_regval = 0;
95         ii_int_config.sh_ii_int0_config_s.type = 0;
96         ii_int_config.sh_ii_int0_config_s.agt = 0;
97         ii_int_config.sh_ii_int0_config_s.pid = cpu0;
98         ii_int_config.sh_ii_int0_config_s.base = 0;
99
100         HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT0_CONFIG),
101                 ii_int_config.sh_ii_int0_config_regval);
102
103
104         /* The II_INT_CONFIG register for cpu 1. */
105         ii_int_config.sh_ii_int0_config_regval = 0;
106         ii_int_config.sh_ii_int0_config_s.type = 0;
107         ii_int_config.sh_ii_int0_config_s.agt = 0;
108         ii_int_config.sh_ii_int0_config_s.pid = cpu1;
109         ii_int_config.sh_ii_int0_config_s.base = 0;
110
111         HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT1_CONFIG),
112                 ii_int_config.sh_ii_int0_config_regval);
113
114
115         /* Enable interrupts for II_INT0 and 1. */
116         ii_int_enable.sh_ii_int0_enable_regval = 0;
117         ii_int_enable.sh_ii_int0_enable_s.ii_enable = 1;
118
119         HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT0_ENABLE),
120                 ii_int_enable.sh_ii_int0_enable_regval);
121         HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT1_ENABLE),
122                 ii_int_enable.sh_ii_int0_enable_regval);
123 }
124
125 static int intr_reserve_level(cpuid_t cpu, int bit)
126 {
127         irqpda_t        *irqs = irqpdaindr;
128         int             min_shared;
129         int             i;
130
131         if (bit < 0) {
132                 for (i = IA64_SN2_FIRST_DEVICE_VECTOR; i <= IA64_SN2_LAST_DEVICE_VECTOR; i++) {
133                         if (irqs->irq_flags[i] == 0) {
134                                 bit = i;
135                                 break;
136                         }
137                 }
138         }
139
140         if (bit < 0) {  /* ran out of irqs.  Have to share.  This will be rare. */
141                 min_shared = 256;
142                 for (i=IA64_SN2_FIRST_DEVICE_VECTOR; i < IA64_SN2_LAST_DEVICE_VECTOR; i++) {
143                         /* Share with the same device class */
144                         /* XXX: gross layering violation.. */
145                         if (irqpdaindr->curr->vendor == irqpdaindr->device_dev[i]->vendor &&
146                                 irqpdaindr->curr->device == irqpdaindr->device_dev[i]->device &&
147                                 irqpdaindr->share_count[i] < min_shared) {
148                                         min_shared = irqpdaindr->share_count[i];
149                                         bit = i;
150                         }
151                 }
152         
153                 min_shared = 256;
154                 if (bit < 0) {  /* didn't find a matching device, just pick one. This will be */
155                                 /* exceptionally rare. */
156                         for (i=IA64_SN2_FIRST_DEVICE_VECTOR; i < IA64_SN2_LAST_DEVICE_VECTOR; i++) {
157                                 if (irqpdaindr->share_count[i] < min_shared) {
158                                         min_shared = irqpdaindr->share_count[i];
159                                         bit = i;
160                                 }
161                         }
162                 }
163                 irqpdaindr->share_count[bit]++;
164         }
165
166         if (!(irqs->irq_flags[bit] & SN2_IRQ_SHARED)) {
167                 if (irqs->irq_flags[bit] & SN2_IRQ_RESERVED)
168                         return -1;
169                 irqs->num_irq_used++;
170         }
171
172         irqs->irq_flags[bit] |= SN2_IRQ_RESERVED;
173         return bit;
174 }
175
176 void intr_unreserve_level(cpuid_t cpu,
177                 int bit)
178 {
179         irqpda_t        *irqs = irqpdaindr;
180
181         if (irqs->irq_flags[bit] & SN2_IRQ_RESERVED) {
182                 irqs->num_irq_used--;
183                 irqs->irq_flags[bit] &= ~SN2_IRQ_RESERVED;
184         }
185 }
186
187 int intr_connect_level(cpuid_t cpu, int bit)
188 {
189         irqpda_t        *irqs = irqpdaindr;
190
191         if (!(irqs->irq_flags[bit] & SN2_IRQ_SHARED) &&
192              (irqs->irq_flags[bit] & SN2_IRQ_CONNECTED))
193                 return -1;
194  
195         irqs->irq_flags[bit] |= SN2_IRQ_CONNECTED;
196         return bit;
197 }
198
199 int intr_disconnect_level(cpuid_t cpu, int bit)
200 {
201         irqpda_t        *irqs = irqpdaindr;
202
203         if (!(irqs->irq_flags[bit] & SN2_IRQ_CONNECTED))
204                 return -1;
205         irqs->irq_flags[bit] &= ~SN2_IRQ_CONNECTED;
206         return bit;
207 }
208
209 /*
210  * Choose a cpu on this node.
211  *
212  * We choose the one with the least number of int's assigned to it.
213  */
214 static cpuid_t intr_cpu_choose_from_node(cnodeid_t cnode)
215 {
216         cpuid_t         cpu, best_cpu = CPU_NONE;
217         int             slice, min_count = 1000;
218
219         for (slice = CPUS_PER_NODE - 1; slice >= 0; slice--) {
220                 int intrs;
221
222                 cpu = cnode_slice_to_cpuid(cnode, slice);
223                 if (cpu == NR_CPUS)
224                         continue;
225                 if (!cpu_online(cpu))
226                         continue;
227
228                 intrs = pdacpu(cpu)->sn_num_irqs;
229
230                 if (min_count > intrs) {
231                         min_count = intrs;
232                         best_cpu = cpu;
233                         if (enable_shub_wars_1_1()) {
234                                 /*
235                                  * Rather than finding the best cpu, always
236                                  * return the first cpu.  This forces all
237                                  * interrupts to the same cpu
238                                  */
239                                 break;
240                         }
241                 }
242         }
243         pdacpu(best_cpu)->sn_num_irqs++;
244         return best_cpu;
245 }
246
247 /*
248  * We couldn't put it on the closest node.  Try to find another one.
249  * Do a stupid round-robin assignment of the node.
250  */
251 static cpuid_t intr_cpu_choose_node(void)
252 {
253         static cnodeid_t last_node = -1;        /* XXX: racy */
254         cnodeid_t candidate_node;
255         cpuid_t cpuid;
256
257         if (last_node >= numnodes)
258                 last_node = 0;
259
260         for (candidate_node = last_node + 1; candidate_node != last_node;
261                         candidate_node++) {
262                 if (candidate_node == numnodes)
263                         candidate_node = 0;
264                 cpuid = intr_cpu_choose_from_node(candidate_node);
265                 if (cpuid != CPU_NONE)
266                         return cpuid;
267         }
268
269         return CPU_NONE;
270 }
271
272 /*
273  * Find the node to assign for this interrupt.
274  *
275  * SN2 + pcibr addressing limitation:
276  *   Due to this limitation, all interrupts from a given bridge must
277  *   go to the name node.  The interrupt must also be targetted for
278  *   the same processor.  This limitation does not exist on PIC.
279  *   But, the processor limitation will stay.  The limitation will be
280  *   similar to the bedrock/xbridge limit regarding PI's
281  */
282 cpuid_t intr_heuristic(vertex_hdl_t dev, int req_bit, int *resp_bit)
283 {
284         cpuid_t         cpuid;
285         vertex_hdl_t    pconn_vhdl;
286         pcibr_soft_t    pcibr_soft;
287         int             bit;
288
289         /* XXX: gross layering violation.. */
290         if (hwgraph_edge_get(dev, EDGE_LBL_PCI, &pconn_vhdl) == GRAPH_SUCCESS) {
291                 pcibr_soft = pcibr_soft_get(pconn_vhdl);
292                 if (pcibr_soft && pcibr_soft->bsi_err_intr) {
293                         /*
294                          * The cpu was chosen already when we assigned
295                          * the error interrupt.
296                          */
297                         cpuid = ((hub_intr_t)pcibr_soft->bsi_err_intr)->i_cpuid;
298                         goto done;
299                 }
300         }
301
302         /*
303          * Need to choose one.  Try the controlling c-brick first.
304          */
305         cpuid = intr_cpu_choose_from_node(master_node_get(dev));
306         if (cpuid == CPU_NONE)
307                 cpuid = intr_cpu_choose_node();
308
309  done:
310         if (cpuid != CPU_NONE) {
311                 bit = intr_reserve_level(cpuid, req_bit);
312                 if (bit >= 0) {
313                         *resp_bit = bit;
314                         return cpuid;
315                 }
316         }
317
318         printk("Cannot target interrupt to target cpu (%ld).\n", cpuid);
319         return CPU_NONE;
320 }