2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
9 #include <linux/vmalloc.h>
10 #include <linux/slab.h>
11 #include <asm/sn/sgi.h>
12 #include <asm/sn/pci/pci_bus_cvlink.h>
13 #include <asm/sn/sn_cpuid.h>
14 #include <asm/sn/simulator.h>
16 extern int bridge_rev_b_data_check_disable;
18 vertex_hdl_t busnum_to_pcibr_vhdl[MAX_PCI_XWIDGET];
19 nasid_t busnum_to_nid[MAX_PCI_XWIDGET];
20 void * busnum_to_atedmamaps[MAX_PCI_XWIDGET];
21 unsigned char num_bridges;
22 static int done_probing;
23 extern irqpda_t *irqpdaindr;
25 static int pci_bus_map_create(struct pcibr_list_s *softlistp, moduleid_t io_moduleid);
26 vertex_hdl_t devfn_to_vertex(unsigned char busnum, unsigned int devfn);
28 extern void register_pcibr_intr(int irq, pcibr_intr_t intr);
30 static struct sn_flush_device_list *sn_dma_flush_init(unsigned long start,
32 int idx, int pin, int slot);
33 extern int cbrick_type_get_nasid(nasid_t);
34 extern void ioconfig_bus_new_entries(void);
35 extern void ioconfig_get_busnum(char *, int *);
36 extern int iomoduleid_get(nasid_t);
37 extern int pcibr_widget_to_bus(vertex_hdl_t);
38 extern int isIO9(int);
40 #define IS_OPUS(nasid) (cbrick_type_get_nasid(nasid) == MODULE_OPUSBRICK)
41 #define IS_ALTIX(nasid) (cbrick_type_get_nasid(nasid) == MODULE_CBRICK)
44 * Init the provider asic for a given device
47 static inline void __init
48 set_pci_provider(struct sn_device_sysdata *device_sysdata)
50 pciio_info_t pciio_info = pciio_info_get(device_sysdata->vhdl);
52 device_sysdata->pci_provider = pciio_info_pops_get(pciio_info);
56 * pci_bus_cvlink_init() - To be called once during initialization before
57 * SGI IO Infrastructure init is called.
60 pci_bus_cvlink_init(void)
63 extern int ioconfig_bus_init(void);
65 memset(busnum_to_pcibr_vhdl, 0x0, sizeof(vertex_hdl_t) * MAX_PCI_XWIDGET);
66 memset(busnum_to_nid, 0x0, sizeof(nasid_t) * MAX_PCI_XWIDGET);
68 memset(busnum_to_atedmamaps, 0x0, sizeof(void *) * MAX_PCI_XWIDGET);
72 return ioconfig_bus_init();
76 * pci_bus_to_vertex() - Given a logical Linux Bus Number returns the associated
77 * pci bus vertex from the SGI IO Infrastructure.
79 static inline vertex_hdl_t
80 pci_bus_to_vertex(unsigned char busnum)
83 vertex_hdl_t pci_bus = NULL;
87 * First get the xwidget vertex.
89 pci_bus = busnum_to_pcibr_vhdl[busnum];
94 * devfn_to_vertex() - returns the vertex of the device given the bus, slot,
95 * and function numbers.
98 devfn_to_vertex(unsigned char busnum, unsigned int devfn)
104 vertex_hdl_t pci_bus = NULL;
105 vertex_hdl_t device_vertex = (vertex_hdl_t)NULL;
108 * Go get the pci bus vertex.
110 pci_bus = pci_bus_to_vertex(busnum);
113 * During probing, the Linux pci code invents non-existent
114 * bus numbers and pci_dev structures and tries to access
115 * them to determine existence. Don't crib during probing.
118 printk("devfn_to_vertex: Invalid bus number %d given.\n", busnum);
124 * Go get the slot&function vertex.
125 * Should call pciio_slot_func_to_name() when ready.
127 slot = PCI_SLOT(devfn);
128 func = PCI_FUNC(devfn);
131 * For a NON Multi-function card the name of the device looks like:
132 * ../pci/1, ../pci/2 ..
135 sprintf(name, "%d", slot);
136 if (hwgraph_traverse(pci_bus, name, &device_vertex) ==
139 return(device_vertex);
145 * This maybe a multifunction card. It's names look like:
146 * ../pci/1a, ../pci/1b, etc.
148 sprintf(name, "%d%c", slot, 'a'+func);
149 if (hwgraph_traverse(pci_bus, name, &device_vertex) != GRAPH_SUCCESS) {
150 if (!device_vertex) {
155 return(device_vertex);
159 * sn_alloc_pci_sysdata() - This routine allocates a pci controller
160 * which is expected as the pci_dev and pci_bus sysdata by the Linux
161 * PCI infrastructure.
163 static struct pci_controller *
164 sn_alloc_pci_sysdata(void)
166 struct pci_controller *pci_sysdata;
168 pci_sysdata = kmalloc(sizeof(*pci_sysdata), GFP_KERNEL);
172 memset(pci_sysdata, 0, sizeof(*pci_sysdata));
177 * sn_pci_fixup_bus() - This routine sets up a bus's resources
178 * consistent with the Linux PCI abstraction layer.
181 sn_pci_fixup_bus(struct pci_bus *bus)
183 struct pci_controller *pci_sysdata;
184 struct sn_widget_sysdata *widget_sysdata;
186 pci_sysdata = sn_alloc_pci_sysdata();
188 printk(KERN_WARNING "sn_pci_fixup_bus(): Unable to "
189 "allocate memory for pci_sysdata\n");
192 widget_sysdata = kmalloc(sizeof(struct sn_widget_sysdata),
194 if (!widget_sysdata) {
195 printk(KERN_WARNING "sn_pci_fixup_bus(): Unable to "
196 "allocate memory for widget_sysdata\n");
201 widget_sysdata->vhdl = pci_bus_to_vertex(bus->number);
202 pci_sysdata->platform_data = (void *)widget_sysdata;
203 bus->sysdata = pci_sysdata;
209 * sn_pci_fixup_slot() - This routine sets up a slot's resources
210 * consistent with the Linux PCI abstraction layer. Resources acquired
211 * from our PCI provider include PIO maps to BAR space and interrupt
215 sn_pci_fixup_slot(struct pci_dev *dev)
217 extern int bit_pos_to_irq(int);
223 struct pci_controller *pci_sysdata;
224 struct sn_device_sysdata *device_sysdata;
225 pciio_intr_line_t lines = 0;
226 vertex_hdl_t device_vertex;
227 pciio_provider_t *pci_provider;
228 pciio_intr_t intr_handle;
230 /* Allocate a controller structure */
231 pci_sysdata = sn_alloc_pci_sysdata();
233 printk(KERN_WARNING "sn_pci_fixup_slot: Unable to "
234 "allocate memory for pci_sysdata\n");
238 /* Set the device vertex */
239 device_sysdata = kmalloc(sizeof(struct sn_device_sysdata), GFP_KERNEL);
240 if (!device_sysdata) {
241 printk(KERN_WARNING "sn_pci_fixup_slot: Unable to "
242 "allocate memory for device_sysdata\n");
247 device_sysdata->vhdl = devfn_to_vertex(dev->bus->number, dev->devfn);
248 pci_sysdata->platform_data = (void *) device_sysdata;
249 dev->sysdata = pci_sysdata;
250 set_pci_provider(device_sysdata);
252 pci_read_config_word(dev, PCI_COMMAND, &cmd);
255 * Set the resources address correctly. The assumption here
256 * is that the addresses in the resource structure has been
257 * read from the card and it was set in the card by our
258 * Infrastructure. NOTE: PIC and TIOCP don't have big-window
259 * upport for PCI I/O space. So by mapping the I/O space
260 * first we will attempt to use Device(x) registers for I/O
261 * BARs (which can't use big windows like MEM BARs can).
263 vhdl = device_sysdata->vhdl;
265 /* Allocate the IORESOURCE_IO space first */
266 for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
267 unsigned long start, end, addr;
269 device_sysdata->pio_map[idx] = NULL;
271 if (!(dev->resource[idx].flags & IORESOURCE_IO))
274 start = dev->resource[idx].start;
275 end = dev->resource[idx].end;
280 addr = (unsigned long)pciio_pio_addr(vhdl, 0,
281 PCIIO_SPACE_WIN(idx), 0, size,
282 &device_sysdata->pio_map[idx], 0);
285 dev->resource[idx].start = 0;
286 dev->resource[idx].end = 0;
287 printk("sn_pci_fixup(): pio map failure for "
288 "%s bar%d\n", dev->slot_name, idx);
290 addr |= __IA64_UNCACHED_OFFSET;
291 dev->resource[idx].start = addr;
292 dev->resource[idx].end = addr + size;
295 if (dev->resource[idx].flags & IORESOURCE_IO)
296 cmd |= PCI_COMMAND_IO;
299 /* Allocate the IORESOURCE_MEM space next */
300 for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
301 unsigned long start, end, addr;
303 if ((dev->resource[idx].flags & IORESOURCE_IO))
306 start = dev->resource[idx].start;
307 end = dev->resource[idx].end;
312 addr = (unsigned long)pciio_pio_addr(vhdl, 0,
313 PCIIO_SPACE_WIN(idx), 0, size,
314 &device_sysdata->pio_map[idx], 0);
317 dev->resource[idx].start = 0;
318 dev->resource[idx].end = 0;
319 printk("sn_pci_fixup(): pio map failure for "
320 "%s bar%d\n", dev->slot_name, idx);
322 addr |= __IA64_UNCACHED_OFFSET;
323 dev->resource[idx].start = addr;
324 dev->resource[idx].end = addr + size;
327 if (dev->resource[idx].flags & IORESOURCE_MEM)
328 cmd |= PCI_COMMAND_MEMORY;
332 * Assign addresses to the ROMs, but don't enable them yet
333 * Also note that we only map display card ROMs due to PIO mapping
336 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
338 size = dev->resource[PCI_ROM_RESOURCE].end -
339 dev->resource[PCI_ROM_RESOURCE].start;
342 addr = (unsigned long) pciio_pio_addr(vhdl, 0,
344 0, size, 0, PIOMAP_FIXED);
346 dev->resource[PCI_ROM_RESOURCE].start = 0;
347 dev->resource[PCI_ROM_RESOURCE].end = 0;
348 printk("sn_pci_fixup(): ROM pio map failure "
349 "for %s\n", dev->slot_name);
351 addr |= __IA64_UNCACHED_OFFSET;
352 dev->resource[PCI_ROM_RESOURCE].start = addr;
353 dev->resource[PCI_ROM_RESOURCE].end = addr + size;
354 if (dev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_MEM)
355 cmd |= PCI_COMMAND_MEMORY;
360 * Update the Command Word on the Card.
362 cmd |= PCI_COMMAND_MASTER; /* If the device doesn't support */
363 /* bit gets dropped .. no harm */
364 pci_write_config_word(dev, PCI_COMMAND, cmd);
366 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, (unsigned char *)&lines);
367 device_vertex = device_sysdata->vhdl;
368 pci_provider = device_sysdata->pci_provider;
369 device_sysdata->intr_handle = NULL;
374 irqpdaindr->curr = dev;
376 intr_handle = (pci_provider->intr_alloc)(device_vertex, NULL, lines, device_vertex);
377 if (intr_handle == NULL) {
378 printk(KERN_WARNING "sn_pci_fixup: pcibr_intr_alloc() failed\n");
380 kfree(device_sysdata);
384 device_sysdata->intr_handle = intr_handle;
385 irq = intr_handle->pi_irq;
386 irqpdaindr->device_dev[irq] = dev;
387 (pci_provider->intr_connect)(intr_handle, (intr_func_t)0, (intr_arg_t)0);
390 register_pcibr_intr(irq, (pcibr_intr_t)intr_handle);
392 for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
393 int ibits = ((pcibr_intr_t)intr_handle)->bi_ibits;
396 size = dev->resource[idx].end -
397 dev->resource[idx].start;
398 if (size == 0) continue;
400 for (i=0; i<8; i++) {
401 if (ibits & (1 << i) ) {
402 extern pcibr_info_t pcibr_info_get(vertex_hdl_t);
403 device_sysdata->dma_flush_list =
404 sn_dma_flush_init(dev->resource[idx].start,
405 dev->resource[idx].end,
408 PCIBR_INFO_SLOT_GET_EXT(pcibr_info_get(device_sysdata->vhdl)));
415 #ifdef CONFIG_HOTPLUG_PCI_SGI
418 sn_dma_flush_clear(struct sn_flush_device_list *dma_flush_list,
419 unsigned long start, unsigned long end)
424 dma_flush_list->pin = -1;
425 dma_flush_list->bus = -1;
426 dma_flush_list->slot = -1;
428 for (i = 0; i < PCI_ROM_RESOURCE; i++)
429 if ((dma_flush_list->bar_list[i].start == start) &&
430 (dma_flush_list->bar_list[i].end == end)) {
431 dma_flush_list->bar_list[i].start = 0;
432 dma_flush_list->bar_list[i].end = 0;
439 * sn_pci_unfixup_slot() - This routine frees a slot's resources
440 * consistent with the Linux PCI abstraction layer. Resources released
441 * back to our PCI provider include PIO maps to BAR space and interrupt
445 sn_pci_unfixup_slot(struct pci_dev *dev)
447 struct sn_device_sysdata *device_sysdata;
449 pciio_intr_t intr_handle;
454 device_sysdata = SN_DEVICE_SYSDATA(dev);
456 vhdl = device_sysdata->vhdl;
458 if (device_sysdata->dma_flush_list)
459 for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
460 size = dev->resource[idx].end -
461 dev->resource[idx].start;
462 if (size == 0) continue;
464 sn_dma_flush_clear(device_sysdata->dma_flush_list,
465 dev->resource[idx].start,
466 dev->resource[idx].end);
469 intr_handle = device_sysdata->intr_handle;
471 extern void unregister_pcibr_intr(int, pcibr_intr_t);
472 irq = intr_handle->pi_irq;
473 irqpdaindr->device_dev[irq] = NULL;
474 unregister_pcibr_intr(irq, (pcibr_intr_t) intr_handle);
475 pciio_intr_disconnect(intr_handle);
476 pciio_intr_free(intr_handle);
479 for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
480 if (device_sysdata->pio_map[idx]) {
481 pciio_piomap_done (device_sysdata->pio_map[idx]);
482 pciio_piomap_free (device_sysdata->pio_map[idx]);
487 #endif /* CONFIG_HOTPLUG_PCI_SGI */
489 struct sn_flush_nasid_entry flush_nasid_list[MAX_NASIDS];
491 /* Initialize the data structures for flushing write buffers after a PIO read.
493 * Take an unused int. pin and associate it with a pin that is in use.
494 * After a PIO read, force an interrupt on the unused pin, forcing a write buffer flush
495 * on the in use pin. This will prevent the race condition between PIO read responses and
498 static struct sn_flush_device_list *
499 sn_dma_flush_init(unsigned long start, unsigned long end, int idx, int pin, int slot)
502 unsigned long dnasid;
505 struct sn_flush_device_list *p;
510 nasid = NASID_GET(start);
511 wid_num = SWIN_WIDGETNUM(start);
512 bus = (start >> 23) & 0x1;
513 bwin = BWIN_WINDOWNUM(start);
515 if (flush_nasid_list[nasid].widget_p == NULL) {
516 flush_nasid_list[nasid].widget_p = (struct sn_flush_device_list **)kmalloc((HUB_WIDGET_ID_MAX+1) *
517 sizeof(struct sn_flush_device_list *), GFP_KERNEL);
518 if (!flush_nasid_list[nasid].widget_p) {
519 printk(KERN_WARNING "sn_dma_flush_init: Cannot allocate memory for nasid list\n");
522 memset(flush_nasid_list[nasid].widget_p, 0, (HUB_WIDGET_ID_MAX+1) * sizeof(struct sn_flush_device_list *));
525 int itte_index = bwin - 1;
528 itte = HUB_L(IIO_ITTE_GET(nasid, itte_index));
529 flush_nasid_list[nasid].iio_itte[bwin] = itte;
530 wid_num = (itte >> IIO_ITTE_WIDGET_SHIFT)
531 & IIO_ITTE_WIDGET_MASK;
532 bus = itte & IIO_ITTE_OFFSET_MASK;
533 if (bus == 0x4 || bus == 0x8) {
540 /* if it's IO9, bus 1, we don't care about slots 1 and 4. This is
541 * because these are the IOC4 slots and we don't flush them.
543 if (isIO9(nasid) && bus == 0 && (slot == 1 || slot == 4)) {
546 if (flush_nasid_list[nasid].widget_p[wid_num] == NULL) {
547 flush_nasid_list[nasid].widget_p[wid_num] = (struct sn_flush_device_list *)kmalloc(
548 DEV_PER_WIDGET * sizeof (struct sn_flush_device_list), GFP_KERNEL);
549 if (!flush_nasid_list[nasid].widget_p[wid_num]) {
550 printk(KERN_WARNING "sn_dma_flush_init: Cannot allocate memory for nasid sub-list\n");
553 memset(flush_nasid_list[nasid].widget_p[wid_num], 0,
554 DEV_PER_WIDGET * sizeof (struct sn_flush_device_list));
555 p = &flush_nasid_list[nasid].widget_p[wid_num][0];
556 for (i=0; i<DEV_PER_WIDGET;i++) {
564 p = &flush_nasid_list[nasid].widget_p[wid_num][0];
565 for (i=0;i<DEV_PER_WIDGET; i++) {
566 if (p->pin == pin && p->bus == bus && p->slot == slot) break;
576 for (i=0; i<PCI_ROM_RESOURCE; i++) {
577 if (p->bar_list[i].start == 0) {
578 p->bar_list[i].start = start;
579 p->bar_list[i].end = end;
583 b = (void *)(NODE_SWIN_BASE(nasid, wid_num) | (bus << 23) );
585 /* If it's IO9, then slot 2 maps to slot 7 and slot 6 maps to slot 8.
586 * To see this is non-trivial. By drawing pictures and reading manuals and talking
587 * to HW guys, we can see that on IO9 bus 1, slots 7 and 8 are always unused.
588 * Further, since we short-circuit slots 1, 3, and 4 above, we only have to worry
589 * about the case when there is a card in slot 2. A multifunction card will appear
590 * to be in slot 6 (from an interrupt point of view) also. That's the most we'll
591 * have to worry about. A four function card will overload the interrupt lines in
593 * We also need to special case the 12160 device in slot 3. Fortunately, we have
594 * a spare intr. line for pin 4, so we'll use that for the 12160.
595 * All other buses have slot 3 and 4 and slots 7 and 8 unused. Since we can only
596 * see slots 1 and 2 and slots 5 and 6 coming through here for those buses (this
597 * is true only on Pxbricks with 2 physical slots per bus), we just need to add
598 * 2 to the slot number to find an unused slot.
599 * We have convinced ourselves that we will never see a case where two different cards
600 * in two different slots will ever share an interrupt line, so there is no need to
604 if (isIO9(nasid) && ( (IS_ALTIX(nasid) && wid_num == 0xc)
605 || (IS_OPUS(nasid) && wid_num == 0xf) )
608 p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, 6);
609 pcireg_bridge_intr_device_bit_set(b, (1<<18));
610 dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
611 pcireg_bridge_intr_addr_set(b, 6, ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
612 (dnasid << 36) | (0xfUL << 48)));
613 } else if (pin == 2) { /* 12160 SCSI device in IO9 */
614 p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, 4);
615 pcireg_bridge_intr_device_bit_set(b, (2<<12));
616 dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
617 pcireg_bridge_intr_addr_set(b, 4,
618 ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
619 (dnasid << 36) | (0xfUL << 48)));
620 } else { /* slot == 6 */
621 p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, 7);
622 pcireg_bridge_intr_device_bit_set(b, (5<<21));
623 dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
624 pcireg_bridge_intr_addr_set(b, 7,
625 ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
626 (dnasid << 36) | (0xfUL << 48)));
629 p->force_int_addr = (unsigned long)pcireg_bridge_force_always_addr_get(b, (pin +2));
630 pcireg_bridge_intr_device_bit_set(b, (pin << (pin * 3)));
631 dnasid = NASID_GET(virt_to_phys(&p->flush_addr));
632 pcireg_bridge_intr_addr_set(b, (pin + 2),
633 ((virt_to_phys(&p->flush_addr) & 0xfffffffff) |
634 (dnasid << 36) | (0xfUL << 48)));
641 * linux_bus_cvlink() Creates a link between the Linux PCI Bus number
642 * to the actual hardware component that it represents:
643 * /dev/hw/linux/busnum/0 -> ../../../hw/module/001c01/slab/0/Ibrick/xtalk/15/pci
645 * The bus vertex, when called to devfs_generate_path() returns:
646 * hw/module/001c01/slab/0/Ibrick/xtalk/15/pci
647 * hw/module/001c01/slab/1/Pbrick/xtalk/12/pci-x/0
648 * hw/module/001c01/slab/1/Pbrick/xtalk/12/pci-x/1
651 linux_bus_cvlink(void)
656 for (index=0; index < MAX_PCI_XWIDGET; index++) {
657 if (!busnum_to_pcibr_vhdl[index])
660 sprintf(name, "%x", index);
661 (void) hwgraph_edge_add(linux_busnum, busnum_to_pcibr_vhdl[index],
667 * pci_bus_map_create() - Called by pci_bus_to_hcl_cvlink() to finish the job.
669 * Linux PCI Bus numbers are assigned from lowest module_id numbers
673 pci_bus_map_create(struct pcibr_list_s *softlistp, moduleid_t moduleid)
676 int basebus_num, bus_number;
677 vertex_hdl_t pci_bus = softlistp->bl_vhdl;
678 char moduleid_str[16];
680 memset(moduleid_str, 0, 16);
681 format_module_id(moduleid_str, moduleid, MODULE_FORMAT_BRIEF);
682 (void) ioconfig_get_busnum((char *)moduleid_str, &basebus_num);
685 * Assign the correct bus number and also the nasid of this
688 bus_number = basebus_num + pcibr_widget_to_bus(pci_bus);
691 char hwpath[MAXDEVNAME] = "\0";
692 extern int hwgraph_vertex_name_get(vertex_hdl_t, char *, uint);
694 pcibr_soft_t pcibr_soft = softlistp->bl_soft;
695 hwgraph_vertex_name_get(pci_bus, hwpath, MAXDEVNAME);
696 printk("%s:\n\tbus_num %d, basebus_num %d, brick_bus %d, "
697 "bus_vhdl 0x%lx, brick_type %d\n", hwpath, bus_number,
698 basebus_num, pcibr_widget_to_bus(pci_bus),
699 (uint64_t)pci_bus, pcibr_soft->bs_bricktype);
702 busnum_to_pcibr_vhdl[bus_number] = pci_bus;
705 * Pre assign DMA maps needed for 32 Bits Page Map DMA.
707 busnum_to_atedmamaps[bus_number] = (void *) vmalloc(
708 sizeof(struct pcibr_dmamap_s)*MAX_ATE_MAPS);
709 if (busnum_to_atedmamaps[bus_number] <= 0) {
710 printk("pci_bus_map_create: Cannot allocate memory for ate maps\n");
713 memset(busnum_to_atedmamaps[bus_number], 0x0,
714 sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS);
719 * pci_bus_to_hcl_cvlink() - This routine is called after SGI IO Infrastructure
720 * initialization has completed to set up the mappings between PCI BRIDGE
721 * ASIC and logical pci bus numbers.
723 * Must be called before pci_init() is invoked.
726 pci_bus_to_hcl_cvlink(void)
729 extern pcibr_list_p pcibr_list;
731 for (i = 0; i < nummodules; i++) {
732 struct pcibr_list_s *softlistp = pcibr_list;
733 struct pcibr_list_s *first_in_list = NULL;
734 struct pcibr_list_s *last_in_list = NULL;
736 /* Walk the list of pcibr_soft structs looking for matches */
738 struct pcibr_soft_s *pcibr_soft = softlistp->bl_soft;
741 /* Is this PCI bus associated with this moduleid? */
742 moduleid = NODE_MODULEID(
743 nasid_to_cnodeid(pcibr_soft->bs_nasid));
744 if (modules[i]->id == moduleid) {
745 struct pcibr_list_s *new_element;
747 new_element = kmalloc(sizeof (struct pcibr_soft_s), GFP_KERNEL);
748 if (new_element == NULL) {
749 printk("%s: Couldn't allocate memory\n",__FUNCTION__);
752 new_element->bl_soft = softlistp->bl_soft;
753 new_element->bl_vhdl = softlistp->bl_vhdl;
754 new_element->bl_next = NULL;
756 /* list empty so just put it on the list */
757 if (first_in_list == NULL) {
758 first_in_list = new_element;
759 last_in_list = new_element;
760 softlistp = softlistp->bl_next;
765 * BASEIO IObricks attached to a module have
766 * a higher priority than non BASEIO IOBricks
767 * when it comes to persistant pci bus
768 * numbering, so put them on the front of the
771 if (isIO9(pcibr_soft->bs_nasid)) {
772 new_element->bl_next = first_in_list;
773 first_in_list = new_element;
775 last_in_list->bl_next = new_element;
776 last_in_list = new_element;
779 softlistp = softlistp->bl_next;
783 * We now have a list of all the pci bridges associated with
784 * the module_id, modules[i]. Call pci_bus_map_create() for
787 softlistp = first_in_list;
790 struct pcibr_list_s *next = softlistp->bl_next;
791 iobrick = iomoduleid_get(softlistp->bl_soft->bs_nasid);
792 pci_bus_map_create(softlistp, iobrick);
799 * Create the Linux PCI bus number vertex link.
801 (void)linux_bus_cvlink();
802 (void)ioconfig_bus_new_entries();
808 * Ugly hack to get PCI setup until we have a proper ACPI namespace.
811 #define PCI_BUSES_TO_SCAN 256
813 extern struct pci_ops sn_pci_ops;
818 struct pci_controller *controller;
819 struct list_head *ln;
820 struct pci_bus *pci_bus = NULL;
821 struct pci_dev *pci_dev = NULL;
823 #ifdef CONFIG_PROC_FS
824 extern void register_sn_procfs(void);
826 extern void sgi_master_io_infr_init(void);
827 extern void sn_init_cpei_timer(void);
830 if (!ia64_platform_is("sn2") || IS_RUNNING_ON_SIMULATOR())
834 * This is needed to avoid bounce limit checks in the blk layer
836 ia64_max_iommu_merge_mask = ~PAGE_MASK;
839 * set pci_raw_ops, etc.
841 sgi_master_io_infr_init();
843 sn_init_cpei_timer();
845 #ifdef CONFIG_PROC_FS
846 register_sn_procfs();
849 controller = kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
851 printk(KERN_WARNING "cannot allocate PCI controller\n");
855 memset(controller, 0, sizeof(struct pci_controller));
857 for (i = 0; i < PCI_BUSES_TO_SCAN; i++)
858 if (pci_bus_to_vertex(i))
859 pci_scan_bus(i, &sn_pci_ops, controller);
864 * Initialize the pci bus vertex in the pci_bus struct.
866 for( ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
867 pci_bus = pci_bus_b(ln);
868 ret = sn_pci_fixup_bus(pci_bus);
871 "sn_pci_fixup: sn_pci_fixup_bus fails : error %d\n",
878 * set the root start and end so that drivers calling check_region()
879 * won't see a conflict
881 ioport_resource.start = 0xc000000000000000;
882 ioport_resource.end = 0xcfffffffffffffff;
885 * Set the root start and end for Mem Resource.
887 iomem_resource.start = 0;
888 iomem_resource.end = 0xffffffffffffffff;
891 * Initialize the device vertex in the pci_dev struct.
893 while ((pci_dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) {
894 ret = sn_pci_fixup_slot(pci_dev);
897 "sn_pci_fixup: sn_pci_fixup_slot fails : error %d\n",
906 subsys_initcall(sn_pci_init);