2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
9 #include <linux/module.h>
10 #include <linux/string.h>
11 #include <linux/interrupt.h>
12 #include <asm/sn/sgi.h>
13 #include <asm/sn/sn_sal.h>
14 #include <asm/sn/iograph.h>
15 #include <asm/sn/pci/pciio.h>
16 #include <asm/sn/pci/pcibr.h>
17 #include <asm/sn/pci/pcibr_private.h>
18 #include <asm/sn/pci/pci_defs.h>
20 #include <asm/sn/prio.h>
21 #include <asm/sn/sn_private.h>
24 * global variables to toggle the different levels of pcibr debugging.
25 * -pcibr_debug_mask is the mask of the different types of debugging
26 * you want to enable. See sys/PCI/pcibr_private.h
27 * -pcibr_debug_module is the module you want to trace. By default
28 * all modules are trace. The format is something like "001c10".
29 * -pcibr_debug_widget is the widget you want to trace. For TIO
30 * based bricks use the corelet id.
31 * -pcibr_debug_slot is the pci slot you want to trace.
33 uint32_t pcibr_debug_mask; /* 0x00000000 to disable */
34 static char *pcibr_debug_module = "all"; /* 'all' for all modules */
35 static int pcibr_debug_widget = -1; /* '-1' for all widgets */
36 static int pcibr_debug_slot = -1; /* '-1' for all slots */
40 pcibr_list_p pcibr_list;
43 extern char *pci_space[];
45 /* =====================================================================
46 * Function Table of Contents
48 * The order of functions in this file has stopped
49 * making much sense. We might want to take a look
50 * at it some time and bring back some sanity, or
51 * perhaps bust this file into smaller chunks.
54 extern void do_pcibr_rrb_free_all(pcibr_soft_t, pciio_slot_t);
55 extern void do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int, int);
56 extern void pcibr_rrb_alloc_more(pcibr_soft_t pcibr_soft, int slot,
57 int vchan, int more_rrbs);
59 extern int pcibr_wrb_flush(vertex_hdl_t);
60 extern int pcibr_rrb_alloc(vertex_hdl_t, int *, int *);
61 void pcibr_rrb_alloc_more(pcibr_soft_t, int, int, int);
63 extern void pcibr_rrb_flush(vertex_hdl_t);
65 static int pcibr_try_set_device(pcibr_soft_t, pciio_slot_t, unsigned, uint64_t);
66 void pcibr_release_device(pcibr_soft_t, pciio_slot_t, uint64_t);
68 extern iopaddr_t pcibr_bus_addr_alloc(pcibr_soft_t, pciio_win_info_t,
69 pciio_space_t, int, int, int);
70 extern int hwgraph_vertex_name_get(vertex_hdl_t vhdl, char *buf,
73 int pcibr_detach(vertex_hdl_t);
74 void pcibr_directmap_init(pcibr_soft_t);
75 int pcibr_pcix_rbars_calc(pcibr_soft_t);
76 extern int pcibr_ate_alloc(pcibr_soft_t, int, struct resource *);
77 extern void pcibr_ate_free(pcibr_soft_t, int, int, struct resource *);
78 extern pciio_dmamap_t get_free_pciio_dmamap(vertex_hdl_t);
79 extern void free_pciio_dmamap(pcibr_dmamap_t);
80 extern int pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl);
82 extern void ate_write(pcibr_soft_t, int, int, bridge_ate_t);
84 pcibr_info_t pcibr_info_get(vertex_hdl_t);
86 static iopaddr_t pcibr_addr_pci_to_xio(vertex_hdl_t, pciio_slot_t, pciio_space_t, iopaddr_t, size_t, unsigned);
88 pcibr_piomap_t pcibr_piomap_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
89 void pcibr_piomap_free(pcibr_piomap_t);
90 caddr_t pcibr_piomap_addr(pcibr_piomap_t, iopaddr_t, size_t);
91 void pcibr_piomap_done(pcibr_piomap_t);
92 caddr_t pcibr_piotrans_addr(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
93 iopaddr_t pcibr_piospace_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, size_t, size_t);
94 void pcibr_piospace_free(vertex_hdl_t, pciio_space_t, iopaddr_t, size_t);
96 static iopaddr_t pcibr_flags_to_d64(unsigned, pcibr_soft_t);
97 extern bridge_ate_t pcibr_flags_to_ate(pcibr_soft_t, unsigned);
99 pcibr_dmamap_t pcibr_dmamap_alloc(vertex_hdl_t, device_desc_t, size_t, unsigned);
100 void pcibr_dmamap_free(pcibr_dmamap_t);
101 extern bridge_ate_p pcibr_ate_addr(pcibr_soft_t, int);
102 static iopaddr_t pcibr_addr_xio_to_pci(pcibr_soft_t, iopaddr_t, size_t);
103 iopaddr_t pcibr_dmamap_addr(pcibr_dmamap_t, paddr_t, size_t);
104 void pcibr_dmamap_done(pcibr_dmamap_t);
105 cnodeid_t pcibr_get_dmatrans_node(vertex_hdl_t);
106 iopaddr_t pcibr_dmatrans_addr(vertex_hdl_t, device_desc_t, paddr_t, size_t, unsigned);
107 void pcibr_dmamap_drain(pcibr_dmamap_t);
108 void pcibr_dmaaddr_drain(vertex_hdl_t, paddr_t, size_t);
109 iopaddr_t pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
111 void pcibr_provider_startup(vertex_hdl_t);
112 void pcibr_provider_shutdown(vertex_hdl_t);
114 int pcibr_reset(vertex_hdl_t);
115 pciio_endian_t pcibr_endian_set(vertex_hdl_t, pciio_endian_t, pciio_endian_t);
116 int pcibr_device_flags_set(vertex_hdl_t, pcibr_device_flags_t);
118 extern int pcibr_slot_info_free(vertex_hdl_t,pciio_slot_t);
119 extern int pcibr_slot_detach(vertex_hdl_t, pciio_slot_t, int,
122 pciio_businfo_t pcibr_businfo_get(vertex_hdl_t);
124 /* =====================================================================
125 * Device(x) register management
128 /* pcibr_try_set_device: attempt to modify Device(x)
129 * for the specified slot on the specified bridge
130 * as requested in flags, limited to the specified
131 * bits. Returns which BRIDGE bits were in conflict,
132 * or ZERO if everything went OK.
134 * Caller MUST hold pcibr_lock when calling this function.
137 pcibr_try_set_device(pcibr_soft_t pcibr_soft,
142 pcibr_soft_slot_t slotp;
153 slotp = &pcibr_soft->bs_slot[slot];
155 s = pcibr_lock(pcibr_soft);
157 old = slotp->bss_device;
159 /* figure out what the desired
160 * Device(x) bits are based on
161 * the flags specified.
166 /* Currently, we inherit anything that
167 * the new caller has not specified in
168 * one way or another, unless we take
169 * action here to not inherit.
171 * This is needed for the "swap" stuff,
172 * since it could have been set via
173 * pcibr_endian_set -- altho note that
174 * any explicit PCIBR_BYTE_STREAM or
175 * PCIBR_WORD_VALUES will freely override
176 * the effect of that call (and vice
177 * versa, no protection either way).
179 * I want to get rid of pcibr_endian_set
180 * in favor of tracking DMA endianness
181 * using the flags specified when DMA
182 * channels are created.
185 #define BRIDGE_DEV_WRGA_BITS (BRIDGE_DEV_PMU_WRGA_EN | BRIDGE_DEV_DIR_WRGA_EN)
186 #define BRIDGE_DEV_SWAP_BITS (BRIDGE_DEV_SWAP_PMU | BRIDGE_DEV_SWAP_DIR)
188 /* Do not use Barrier, Write Gather,
189 * or Prefetch unless asked.
190 * Leave everything else as it
191 * was from the last time.
194 & ~BRIDGE_DEV_BARRIER
195 & ~BRIDGE_DEV_WRGA_BITS
199 /* Generic macro flags
201 if (flags & PCIIO_DMA_DATA) {
203 & ~BRIDGE_DEV_BARRIER) /* barrier off */
204 | BRIDGE_DEV_PREF; /* prefetch on */
207 if (flags & PCIIO_DMA_CMD) {
209 & ~BRIDGE_DEV_PREF) /* prefetch off */
210 & ~BRIDGE_DEV_WRGA_BITS) /* write gather off */
211 | BRIDGE_DEV_BARRIER; /* barrier on */
213 /* Generic detail flags
215 if (flags & PCIIO_WRITE_GATHER)
216 new |= BRIDGE_DEV_WRGA_BITS;
217 if (flags & PCIIO_NOWRITE_GATHER)
218 new &= ~BRIDGE_DEV_WRGA_BITS;
220 if (flags & PCIIO_PREFETCH)
221 new |= BRIDGE_DEV_PREF;
222 if (flags & PCIIO_NOPREFETCH)
223 new &= ~BRIDGE_DEV_PREF;
225 if (flags & PCIBR_WRITE_GATHER)
226 new |= BRIDGE_DEV_WRGA_BITS;
227 if (flags & PCIBR_NOWRITE_GATHER)
228 new &= ~BRIDGE_DEV_WRGA_BITS;
230 if (flags & PCIIO_BYTE_STREAM)
231 new |= BRIDGE_DEV_SWAP_DIR;
232 if (flags & PCIIO_WORD_VALUES)
233 new &= ~BRIDGE_DEV_SWAP_DIR;
235 /* Provider-specific flags
237 if (flags & PCIBR_PREFETCH)
238 new |= BRIDGE_DEV_PREF;
239 if (flags & PCIBR_NOPREFETCH)
240 new &= ~BRIDGE_DEV_PREF;
242 if (flags & PCIBR_PRECISE)
243 new |= BRIDGE_DEV_PRECISE;
244 if (flags & PCIBR_NOPRECISE)
245 new &= ~BRIDGE_DEV_PRECISE;
247 if (flags & PCIBR_BARRIER)
248 new |= BRIDGE_DEV_BARRIER;
249 if (flags & PCIBR_NOBARRIER)
250 new &= ~BRIDGE_DEV_BARRIER;
252 if (flags & PCIBR_64BIT)
253 new |= BRIDGE_DEV_DEV_SIZE;
254 if (flags & PCIBR_NO64BIT)
255 new &= ~BRIDGE_DEV_DEV_SIZE;
258 * PIC BRINGUP WAR (PV# 855271):
259 * Allow setting BRIDGE_DEV_VIRTUAL_EN on PIC iff we're a 64-bit
260 * device. The bit is only intended for 64-bit devices and, on
261 * PIC, can cause problems for 32-bit devices.
263 if (mask == BRIDGE_DEV_D64_BITS &&
264 PCIBR_WAR_ENABLED(PV855271, pcibr_soft)) {
265 if (flags & PCIBR_VCHAN1) {
266 new |= BRIDGE_DEV_VIRTUAL_EN;
267 mask |= BRIDGE_DEV_VIRTUAL_EN;
271 /* PIC BRINGUP WAR (PV# 878674): Don't allow 64bit PIO accesses */
272 if ((flags & PCIBR_64BIT) &&
273 PCIBR_WAR_ENABLED(PV878674, pcibr_soft)) {
274 new &= ~(1ull << 22);
277 chg = old ^ new; /* what are we changing, */
278 chg &= mask; /* of the interesting bits */
282 badd32 = slotp->bss_d32_uctr ? (BRIDGE_DEV_D32_BITS & chg) : 0;
283 badpmu = slotp->bss_pmu_uctr ? (XBRIDGE_DEV_PMU_BITS & chg) : 0;
284 badd64 = slotp->bss_d64_uctr ? (XBRIDGE_DEV_D64_BITS & chg) : 0;
285 bad = badpmu | badd32 | badd64;
289 /* some conflicts can be resolved by
290 * forcing the bit on. this may cause
291 * some performance degredation in
292 * the stream(s) that want the bit off,
293 * but the alternative is not allowing
294 * the new stream at all.
296 if ( (fix = bad & (BRIDGE_DEV_PRECISE |
297 BRIDGE_DEV_BARRIER)) ) {
299 /* don't change these bits if
300 * they are already set in "old"
304 /* some conflicts can be resolved by
305 * forcing the bit off. this may cause
306 * some performance degredation in
307 * the stream(s) that want the bit on,
308 * but the alternative is not allowing
309 * the new stream at all.
311 if ( (fix = bad & (BRIDGE_DEV_WRGA_BITS |
312 BRIDGE_DEV_PREF)) ) {
314 /* don't change these bits if
315 * we wanted to turn them on.
319 /* conflicts in other bits mean
320 * we can not establish this DMA
321 * channel while the other(s) are
325 pcibr_unlock(pcibr_soft, s);
326 PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
327 "pcibr_try_set_device: mod blocked by 0x%x\n", bad));
332 if (mask == BRIDGE_DEV_PMU_BITS)
333 slotp->bss_pmu_uctr++;
334 if (mask == BRIDGE_DEV_D32_BITS)
335 slotp->bss_d32_uctr++;
336 if (mask == BRIDGE_DEV_D64_BITS)
337 slotp->bss_d64_uctr++;
339 /* the value we want to write is the
340 * original value, with the bits for
341 * our selected changes flipped, and
342 * with any disabled features turned off.
344 new = old ^ chg; /* only change what we want to change */
346 if (slotp->bss_device == new) {
347 pcibr_unlock(pcibr_soft, s);
351 pcireg_device_set(pcibr_soft, slot, new);
352 slotp->bss_device = new;
353 pcireg_tflush_get(pcibr_soft); /* wait until Bridge PIO complete */
354 pcibr_unlock(pcibr_soft, s);
356 PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
357 "pcibr_try_set_device: Device(%d): 0x%x\n", slot, new));
362 pcibr_release_device(pcibr_soft_t pcibr_soft,
366 pcibr_soft_slot_t slotp;
369 slotp = &pcibr_soft->bs_slot[slot];
371 s = pcibr_lock(pcibr_soft);
373 if (mask == BRIDGE_DEV_PMU_BITS)
374 slotp->bss_pmu_uctr--;
375 if (mask == BRIDGE_DEV_D32_BITS)
376 slotp->bss_d32_uctr--;
377 if (mask == BRIDGE_DEV_D64_BITS)
378 slotp->bss_d64_uctr--;
380 pcibr_unlock(pcibr_soft, s);
384 /* =====================================================================
385 * Bridge (pcibr) "Device Driver" entry points
390 pcibr_mmap(struct file * file, struct vm_area_struct * vma)
392 vertex_hdl_t pcibr_vhdl = file->f_dentry->d_fsdata;
393 pcibr_soft_t pcibr_soft;
395 unsigned long phys_addr;
398 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
399 bridge = pcibr_soft->bs_base;
400 phys_addr = (unsigned long)bridge & ~0xc000000000000000; /* Mask out the Uncache bits */
401 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
402 vma->vm_flags |= VM_RESERVED | VM_IO;
403 error = io_remap_page_range(vma, phys_addr, vma->vm_start,
404 vma->vm_end - vma->vm_start,
410 * This is the file operation table for the pcibr driver.
411 * As each of the functions are implemented, put the
412 * appropriate function name below.
414 static int pcibr_mmap(struct file * file, struct vm_area_struct * vma);
415 struct file_operations pcibr_fops = {
416 .owner = THIS_MODULE,
421 /* This is special case code used by grio. There are plans to make
422 * this a bit more general in the future, but till then this should
426 pcibr_device_slot_get(vertex_hdl_t dev_vhdl)
428 char devname[MAXDEVNAME];
430 pciio_info_t pciio_info;
431 pciio_slot_t slot = PCIIO_SLOT_NONE;
433 vertex_to_name(dev_vhdl, devname, MAXDEVNAME);
435 /* run back along the canonical path
436 * until we find a PCI connection point.
438 tdev = hwgraph_connectpt_get(dev_vhdl);
439 while (tdev != GRAPH_VERTEX_NONE) {
440 pciio_info = pciio_info_chk(tdev);
442 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
445 hwgraph_vertex_unref(tdev);
446 tdev = hwgraph_connectpt_get(tdev);
448 hwgraph_vertex_unref(tdev);
454 pcibr_info_get(vertex_hdl_t vhdl)
456 return (pcibr_info_t) pciio_info_get(vhdl);
460 pcibr_device_info_new(
461 pcibr_soft_t pcibr_soft,
463 pciio_function_t rfunc,
464 pciio_vendor_id_t vendor,
465 pciio_device_id_t device)
467 pcibr_info_t pcibr_info;
468 pciio_function_t func;
471 func = (rfunc == PCIIO_FUNC_NONE) ? 0 : rfunc;
474 * Create a pciio_info_s for this device. pciio_device_info_new()
475 * will set the c_slot (which is suppose to represent the external
476 * slot (i.e the slot number silk screened on the back of the I/O
477 * brick)). So for PIC we need to adjust this "internal slot" num
478 * passed into us, into its external representation. See comment
479 * for the PCIBR_DEVICE_TO_SLOT macro for more information.
481 pcibr_info = kmalloc(sizeof (*(pcibr_info)), GFP_KERNEL);
485 memset(pcibr_info, 0, sizeof (*(pcibr_info)));
487 pciio_device_info_new(&pcibr_info->f_c, pcibr_soft->bs_vhdl,
488 PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),
489 rfunc, vendor, device);
490 pcibr_info->f_dev = slot;
492 /* Set PCI bus number */
493 pcibr_info->f_bus = pcibr_widget_to_bus(pcibr_soft->bs_vhdl);
495 if (slot != PCIIO_SLOT_NONE) {
498 * Currently favored mapping from PCI
499 * slot number and INTA/B/C/D to Bridge
500 * PCI Interrupt Bit Number:
512 * XXX- allow pcibr_hints to override default
513 * XXX- allow ADMIN to override pcibr_hints
515 for (ibit = 0; ibit < 4; ++ibit)
516 pcibr_info->f_ibit[ibit] =
517 (slot + 4 * ibit) & 7;
520 * Record the info in the sparse func info space.
522 if (func < pcibr_soft->bs_slot[slot].bss_ninfo)
523 pcibr_soft->bs_slot[slot].bss_infos[func] = pcibr_info;
530 * pcibr_device_unregister
531 * This frees up any hardware resources reserved for this PCI device
532 * and removes any PCI infrastructural information setup for it.
533 * This is usually used at the time of shutting down of the PCI card.
536 pcibr_device_unregister(vertex_hdl_t pconn_vhdl)
538 pciio_info_t pciio_info;
539 vertex_hdl_t pcibr_vhdl;
541 pcibr_soft_t pcibr_soft;
542 int count_vchan0, count_vchan1;
547 pciio_info = pciio_info_get(pconn_vhdl);
549 pcibr_vhdl = pciio_info_master_get(pciio_info);
550 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
552 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
554 /* Clear all the hardware xtalk resources for this device */
555 xtalk_widgetdev_shutdown(pcibr_soft->bs_conn, slot);
557 /* Flush all the rrbs */
558 pcibr_rrb_flush(pconn_vhdl);
561 * If the RRB configuration for this slot has changed, set it
562 * back to the boot-time default
564 if (pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] >= 0) {
566 s = pcibr_lock(pcibr_soft);
568 pcibr_soft->bs_rrb_res[slot] = pcibr_soft->bs_rrb_res[slot] +
569 pcibr_soft->bs_rrb_valid[slot][VCHAN0] +
570 pcibr_soft->bs_rrb_valid[slot][VCHAN1] +
571 pcibr_soft->bs_rrb_valid[slot][VCHAN2] +
572 pcibr_soft->bs_rrb_valid[slot][VCHAN3];
574 /* Free the rrbs allocated to this slot, both the normal & virtual */
575 do_pcibr_rrb_free_all(pcibr_soft, slot);
577 count_vchan0 = pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0];
578 count_vchan1 = pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN1];
580 pcibr_unlock(pcibr_soft, s);
582 pcibr_rrb_alloc(pconn_vhdl, &count_vchan0, &count_vchan1);
586 /* Flush the write buffers !! */
587 error_call = pcibr_wrb_flush(pconn_vhdl);
592 /* Clear the information specific to the slot */
593 error_call = pcibr_slot_info_free(pcibr_vhdl, slot);
603 * pcibr_driver_reg_callback
604 * CDL will call this function for each device found in the PCI
605 * registry that matches the vendor/device IDs supported by
606 * the driver being registered. The device's connection vertex
607 * and the driver's attach function return status enable the
608 * slot's device status to be set.
611 pcibr_driver_reg_callback(vertex_hdl_t pconn_vhdl,
612 int key1, int key2, int error)
614 pciio_info_t pciio_info;
615 pcibr_info_t pcibr_info;
616 vertex_hdl_t pcibr_vhdl;
618 pcibr_soft_t pcibr_soft;
620 /* Do not set slot status for vendor/device ID wildcard drivers */
621 if ((key1 == -1) || (key2 == -1))
624 pciio_info = pciio_info_get(pconn_vhdl);
625 pcibr_info = pcibr_info_get(pconn_vhdl);
627 pcibr_vhdl = pciio_info_master_get(pciio_info);
628 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
630 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
631 pcibr_info->f_att_det_error = error;
633 #ifdef CONFIG_HOTPLUG_PCI_SGI
634 pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
637 pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_INCMPLT;
639 pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_CMPLT;
641 #endif /* CONFIG_HOTPLUG_PCI_SGI */
645 * pcibr_driver_unreg_callback
646 * CDL will call this function for each device found in the PCI
647 * registry that matches the vendor/device IDs supported by
648 * the driver being unregistered. The device's connection vertex
649 * and the driver's detach function return status enable the
650 * slot's device status to be set.
653 pcibr_driver_unreg_callback(vertex_hdl_t pconn_vhdl,
654 int key1, int key2, int error)
656 pciio_info_t pciio_info;
657 pcibr_info_t pcibr_info;
658 vertex_hdl_t pcibr_vhdl;
660 pcibr_soft_t pcibr_soft;
662 /* Do not set slot status for vendor/device ID wildcard drivers */
663 if ((key1 == -1) || (key2 == -1))
666 pciio_info = pciio_info_get(pconn_vhdl);
667 pcibr_info = pcibr_info_get(pconn_vhdl);
669 pcibr_vhdl = pciio_info_master_get(pciio_info);
670 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
672 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
673 pcibr_info->f_att_det_error = error;
674 #ifdef CONFIG_HOTPLUG_PCI_SGI
675 pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
678 pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_INCMPLT;
680 pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_CMPLT;
682 #endif /* CONFIG_HOTPLUG_PCI_SGI */
687 * Detach the bridge device from the hwgraph after cleaning out all the
688 * underlying vertices.
692 pcibr_detach(vertex_hdl_t xconn)
695 vertex_hdl_t pcibr_vhdl;
696 pcibr_soft_t pcibr_soft;
699 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DETACH, xconn, "pcibr_detach\n"));
701 /* Get the bridge vertex from its xtalk connection point */
702 if (hwgraph_traverse(xconn, EDGE_LBL_PCI, &pcibr_vhdl) != GRAPH_SUCCESS)
705 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
707 /* Disable the interrupts from the bridge */
708 s = pcibr_lock(pcibr_soft);
709 pcireg_intr_enable_set(pcibr_soft, 0);
710 pcibr_unlock(pcibr_soft, s);
712 /* Detach all the PCI devices talking to this bridge */
713 for (slot = pcibr_soft->bs_min_slot;
714 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
715 pcibr_slot_detach(pcibr_vhdl, slot, 0, (char *)NULL, (int *)NULL);
718 /* Unregister the no-slot connection point */
719 pciio_device_info_unregister(pcibr_vhdl,
720 &(pcibr_soft->bs_noslot_info->f_c));
722 kfree(pcibr_soft->bs_name);
724 /* Disconnect the error interrupt and free the xtalk resources
725 * associated with it.
727 xtalk_intr_disconnect(pcibr_soft->bsi_err_intr);
728 xtalk_intr_free(pcibr_soft->bsi_err_intr);
730 /* Clear the software state maintained by the bridge driver for this
735 /* Remove the Bridge revision labelled info */
736 (void)hwgraph_info_remove_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, NULL);
743 * Set the Bridge's 32-bit PCI to XTalk Direct Map register to the most useful
744 * value we can determine. Note that we must use a single xid for all of:
745 * -direct-mapped 32-bit DMA accesses
746 * -direct-mapped 64-bit DMA accesses
747 * -DMA accesses through the PMU
749 * This is the only way to guarantee that completion interrupts will reach a
750 * CPU after all DMA data has reached memory.
753 pcibr_directmap_init(pcibr_soft_t pcibr_soft)
758 cnodeid_t cnodeid = 0; /* We need api for diroff api */
761 nasid = cnodeid_to_nasid(cnodeid);
762 paddr = NODE_OFFSET(nasid) + 0;
764 /* Assume that if we ask for a DMA mapping to zero the XIO host will
765 * transmute this into a request for the lowest hunk of memory.
767 xbase = xtalk_dmatrans_addr(pcibr_soft->bs_conn, 0, paddr, PAGE_SIZE, 0);
769 diroff = xbase >> BRIDGE_DIRMAP_OFF_ADDRSHFT;
770 pcireg_dirmap_diroff_set(pcibr_soft, diroff);
771 pcireg_dirmap_wid_set(pcibr_soft, pcibr_soft->bs_mxid);
772 pcibr_soft->bs_dir_xport = pcibr_soft->bs_mxid;
773 if (xbase == (512 << 20)) { /* 512Meg */
774 pcireg_dirmap_add512_set(pcibr_soft);
775 pcibr_soft->bs_dir_xbase = (512 << 20);
777 pcireg_dirmap_add512_clr(pcibr_soft);
778 pcibr_soft->bs_dir_xbase = diroff << BRIDGE_DIRMAP_OFF_ADDRSHFT;
784 pcibr_asic_rev(vertex_hdl_t pconn_vhdl)
786 vertex_hdl_t pcibr_vhdl;
788 arbitrary_info_t ainfo;
791 hwgraph_traverse(pconn_vhdl, EDGE_LBL_MASTER, &pcibr_vhdl))
794 rc = hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &ainfo);
797 * Any hwgraph function that returns a vertex handle will implicity
798 * increment that vertex's reference count. The caller must explicity
799 * decrement the vertex's referece count after the last reference to
802 * Decrement reference count incremented by call to hwgraph_traverse().
805 hwgraph_vertex_unref(pcibr_vhdl);
807 if (rc != GRAPH_SUCCESS)
813 /* =====================================================================
818 pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
825 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
826 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
827 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
828 unsigned bar; /* which BASE reg on device is decoding */
829 iopaddr_t xio_addr = XIO_NOWHERE;
833 pciio_space_t wspace; /* which space device is decoding */
834 iopaddr_t wbase; /* base of device decode on PCI */
835 size_t wsize; /* size of device decode on PCI */
837 int try; /* DevIO(x) window scanning order control */
839 int win; /* which DevIO(x) window is being used */
840 pciio_space_t mspace; /* target space for devio(x) register */
841 iopaddr_t mbase; /* base of devio(x) mapped area on PCI */
842 size_t msize; /* size of devio(x) mapped area on PCI */
843 size_t mmask; /* addr bits stored in Device(x) */
847 s = pcibr_lock(pcibr_soft);
849 if (pcibr_soft->bs_slot[slot].has_host) {
850 slot = pcibr_soft->bs_slot[slot].host_slot;
851 pcibr_info = pcibr_soft->bs_slot[slot].bss_infos[0];
854 * Special case for dual-slot pci devices such as ioc3 on IP27
855 * baseio. In these cases, pconn_vhdl should never be for a pci
856 * function on a subordiate PCI bus, so we can safely reset pciio_info
857 * to be the info struct embedded in pcibr_info. Failure to do this
858 * results in using a bogus pciio_info_t for calculations done later
862 pciio_info = &pcibr_info->f_c;
864 if (space == PCIIO_SPACE_NONE)
867 if (space == PCIIO_SPACE_CFG) {
869 * Usually, the first mapping
870 * established to a PCI device
871 * is to its config space.
873 * In any case, we definitely
874 * do NOT need to worry about
875 * PCI BASE registers, and
876 * MUST NOT attempt to point
877 * the DevIO(x) window at
880 if (((flags & PCIIO_BYTE_STREAM) == 0) &&
881 ((pci_addr + req_size) <= BRIDGE_TYPE0_CFG_FUNC_OFF))
882 xio_addr = pci_addr + PCIBR_TYPE0_CFG_DEV(pcibr_soft, slot);
886 if (space == PCIIO_SPACE_ROM) {
887 /* PIO to the Expansion Rom.
888 * Driver is responsible for
889 * enabling and disabling
892 wbase = pciio_info->c_rbase;
893 wsize = pciio_info->c_rsize;
896 * While the driver should know better
897 * than to attempt to map more space
898 * than the device is decoding, he might
899 * do it; better to bail out here.
901 if ((pci_addr + req_size) > wsize)
905 space = PCIIO_SPACE_MEM;
908 * reduce window mappings to raw
909 * space mappings (maybe allocating
910 * windows), and try for DevIO(x)
911 * usage (setting it if it is available).
913 bar = space - PCIIO_SPACE_WIN0;
915 wspace = pciio_info->c_window[bar].w_space;
916 if (wspace == PCIIO_SPACE_NONE)
919 /* get PCI base and size */
920 wbase = pciio_info->c_window[bar].w_base;
921 wsize = pciio_info->c_window[bar].w_size;
924 * While the driver should know better
925 * than to attempt to map more space
926 * than the device is decoding, he might
927 * do it; better to bail out here.
929 if ((pci_addr + req_size) > wsize)
932 /* shift from window relative to
933 * decoded space relative.
940 /* Scan all the DevIO(x) windows twice looking for one
941 * that can satisfy our request. The first time through,
942 * only look at assigned windows; the second time, also
943 * look at PCIIO_SPACE_NONE windows. Arrange the order
944 * so we always look at our own window first.
946 * We will not attempt to satisfy a single request
947 * by concatinating multiple windows.
949 maxtry = PCIBR_NUM_SLOTS(pcibr_soft) * 2;
950 halftry = PCIBR_NUM_SLOTS(pcibr_soft) - 1;
951 for (try = 0; try < maxtry; ++try) {
955 /* calculate win based on slot, attempt, and max possible
957 win = (try + slot) % PCIBR_NUM_SLOTS(pcibr_soft);
959 /* If this DevIO(x) mapping area can provide
960 * a mapping to this address, use it.
962 msize = (win < 2) ? 0x200000 : 0x100000;
964 if (space != PCIIO_SPACE_IO)
967 offset = pci_addr & (msize - 1);
969 /* If this window can't possibly handle that request,
970 * go on to the next window.
972 if (((pci_addr & (msize - 1)) + req_size) > msize)
975 devreg = pcibr_soft->bs_slot[win].bss_device;
977 /* Is this window "nailed down"?
978 * If not, maybe we can use it.
979 * (only check this the second time through)
981 mspace = pcibr_soft->bs_slot[win].bss_devio.bssd_space;
982 if ((try > halftry) && (mspace == PCIIO_SPACE_NONE)) {
984 /* If this is the primary DevIO(x) window
985 * for some other device, skip it.
988 (PCIIO_VENDOR_ID_NONE !=
989 pcibr_soft->bs_slot[win].bss_vendor_id))
992 /* It's a free window, and we fit in it.
993 * Set up Device(win) to our taste.
995 mbase = pci_addr & mmask;
997 /* check that we would really get from
1000 if ((mbase | offset) != pci_addr)
1003 devreg &= ~BRIDGE_DEV_OFF_MASK;
1004 if (space != PCIIO_SPACE_IO)
1005 devreg |= BRIDGE_DEV_DEV_IO_MEM;
1007 devreg &= ~BRIDGE_DEV_DEV_IO_MEM;
1008 devreg |= (mbase >> 20) & BRIDGE_DEV_OFF_MASK;
1010 /* default is WORD_VALUES.
1011 * if you specify both,
1012 * operation is undefined.
1014 if (flags & PCIIO_BYTE_STREAM)
1015 devreg |= BRIDGE_DEV_DEV_SWAP;
1017 devreg &= ~BRIDGE_DEV_DEV_SWAP;
1019 if (pcibr_soft->bs_slot[win].bss_device != devreg) {
1020 pcireg_device_set(pcibr_soft, win, devreg);
1021 pcibr_soft->bs_slot[win].bss_device = devreg;
1022 pcireg_tflush_get(pcibr_soft);
1024 PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pconn_vhdl,
1025 "pcibr_addr_pci_to_xio: Device(%d): 0x%x\n",
1028 pcibr_soft->bs_slot[win].bss_devio.bssd_space = space;
1029 pcibr_soft->bs_slot[win].bss_devio.bssd_base = mbase;
1030 xio_addr = PCIBR_BRIDGE_DEVIO(pcibr_soft, win) + (pci_addr - mbase);
1032 /* Increment this DevIO's use count */
1033 pcibr_soft->bs_slot[win].bss_devio.bssd_ref_cnt++;
1035 /* Save the DevIO register index used to access this BAR */
1037 pcibr_info->f_window[bar].w_devio_index = win;
1039 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
1040 "pcibr_addr_pci_to_xio: map to space %s [0x%lx..0x%lx] "
1041 "for slot %d allocates DevIO(%d) Device(%d) set to %lx\n",
1042 pci_space[space], pci_addr, pci_addr + req_size - 1,
1043 slot, win, win, devreg));
1046 } /* endif DevIO(x) not pointed */
1047 mbase = pcibr_soft->bs_slot[win].bss_devio.bssd_base;
1049 /* Now check for request incompat with DevIO(x)
1051 if ((mspace != space) ||
1052 (pci_addr < mbase) ||
1053 ((pci_addr + req_size) > (mbase + msize)) ||
1054 ((flags & PCIIO_BYTE_STREAM) && !(devreg & BRIDGE_DEV_DEV_SWAP)) ||
1055 (!(flags & PCIIO_BYTE_STREAM) && (devreg & BRIDGE_DEV_DEV_SWAP)))
1058 /* DevIO(x) window is pointed at PCI space
1059 * that includes our target. Calculate the
1060 * final XIO address, release the lock and
1063 xio_addr = PCIBR_BRIDGE_DEVIO(pcibr_soft, win) + (pci_addr - mbase);
1065 /* Increment this DevIO's use count */
1066 pcibr_soft->bs_slot[win].bss_devio.bssd_ref_cnt++;
1068 /* Save the DevIO register index used to access this BAR */
1070 pcibr_info->f_window[bar].w_devio_index = win;
1072 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
1073 "pcibr_addr_pci_to_xio: map to space %s [0x%lx..0x%lx] "
1074 "for slot %d uses DevIO(%d)\n", pci_space[space],
1075 pci_addr, pci_addr + req_size - 1, slot, win));
1081 * Accesses to device decode
1082 * areas that do a not fit
1083 * within the DevIO(x) space are
1084 * modified to be accesses via
1085 * the direct mapping areas.
1087 * If necessary, drivers can
1088 * explicitly ask for mappings
1089 * into these address spaces,
1090 * but this should never be needed.
1092 case PCIIO_SPACE_MEM: /* "mem space" */
1093 case PCIIO_SPACE_MEM32: /* "mem, use 32-bit-wide bus" */
1094 if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 0)) { /* PIC bus 0 */
1095 base = PICBRIDGE0_PCI_MEM32_BASE;
1096 limit = PICBRIDGE0_PCI_MEM32_LIMIT;
1097 } else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) { /* PIC bus 1 */
1098 base = PICBRIDGE1_PCI_MEM32_BASE;
1099 limit = PICBRIDGE1_PCI_MEM32_LIMIT;
1101 printk("pcibr_addr_pci_to_xio(): unknown bridge type");
1102 return (iopaddr_t)0;
1105 if ((pci_addr + base + req_size - 1) <= limit)
1106 xio_addr = pci_addr + base;
1109 case PCIIO_SPACE_MEM64: /* "mem, use 64-bit-wide bus" */
1110 if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 0)) { /* PIC bus 0 */
1111 base = PICBRIDGE0_PCI_MEM64_BASE;
1112 limit = PICBRIDGE0_PCI_MEM64_LIMIT;
1113 } else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) { /* PIC bus 1 */
1114 base = PICBRIDGE1_PCI_MEM64_BASE;
1115 limit = PICBRIDGE1_PCI_MEM64_LIMIT;
1117 printk("pcibr_addr_pci_to_xio(): unknown bridge type");
1118 return (iopaddr_t)0;
1121 if ((pci_addr + base + req_size - 1) <= limit)
1122 xio_addr = pci_addr + base;
1125 case PCIIO_SPACE_IO: /* "i/o space" */
1127 * PIC bridges do not support big-window aliases into PCI I/O space
1129 xio_addr = XIO_NOWHERE;
1133 /* Check that "Direct PIO" byteswapping matches,
1134 * try to change it if it does not.
1136 if (xio_addr != XIO_NOWHERE) {
1137 unsigned bst; /* nonzero to set bytestream */
1138 unsigned *bfp; /* addr of record of how swapper is set */
1139 uint64_t swb; /* which control bit to mung */
1140 unsigned bfo; /* current swapper setting */
1141 unsigned bfn; /* desired swapper setting */
1143 bfp = ((space == PCIIO_SPACE_IO)
1144 ? (&pcibr_soft->bs_pio_end_io)
1145 : (&pcibr_soft->bs_pio_end_mem));
1149 bst = flags & PCIIO_BYTE_STREAM;
1151 bfn = bst ? PCIIO_BYTE_STREAM : PCIIO_WORD_VALUES;
1153 if (bfn == bfo) { /* we already match. */
1155 } else if (bfo != 0) { /* we have a conflict. */
1156 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
1157 "pcibr_addr_pci_to_xio: swap conflict in %s, "
1158 "was%s%s, want%s%s\n", pci_space[space],
1159 bfo & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
1160 bfo & PCIIO_WORD_VALUES ? " WORD_VALUES" : "",
1161 bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
1162 bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : ""));
1163 xio_addr = XIO_NOWHERE;
1164 } else { /* OK to make the change. */
1165 swb = (space == PCIIO_SPACE_IO) ? 0: BRIDGE_CTRL_MEM_SWAP;
1167 pcireg_control_bit_set(pcibr_soft, swb);
1169 pcireg_control_bit_clr(pcibr_soft, swb);
1172 *bfp = bfn; /* record the assignment */
1174 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
1175 "pcibr_addr_pci_to_xio: swap for %s set to%s%s\n",
1177 bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
1178 bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : ""));
1182 pcibr_unlock(pcibr_soft, s);
1188 pcibr_piomap_alloc(vertex_hdl_t pconn_vhdl,
1189 device_desc_t dev_desc,
1190 pciio_space_t space,
1193 size_t req_size_max,
1196 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
1197 pciio_info_t pciio_info = &pcibr_info->f_c;
1198 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
1199 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
1200 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
1202 pcibr_piomap_t *mapptr;
1203 pcibr_piomap_t maplist;
1204 pcibr_piomap_t pcibr_piomap;
1206 xtalk_piomap_t xtalk_piomap;
1209 /* Make sure that the req sizes are non-zero */
1210 if ((req_size < 1) || (req_size_max < 1)) {
1211 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
1212 "pcibr_piomap_alloc: req_size | req_size_max < 1\n"));
1217 * Code to translate slot/space/addr
1218 * into xio_addr is common between
1219 * this routine and pcibr_piotrans_addr.
1221 xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
1223 if (xio_addr == XIO_NOWHERE) {
1224 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
1225 "pcibr_piomap_alloc: xio_addr == XIO_NOWHERE\n"));
1229 /* Check the piomap list to see if there is already an allocated
1230 * piomap entry but not in use. If so use that one. Otherwise
1231 * allocate a new piomap entry and add it to the piomap list
1233 mapptr = &(pcibr_info->f_piomap);
1235 s = pcibr_lock(pcibr_soft);
1236 for (pcibr_piomap = *mapptr;
1237 pcibr_piomap != NULL;
1238 pcibr_piomap = pcibr_piomap->bp_next) {
1239 if (pcibr_piomap->bp_mapsz == 0)
1246 pcibr_unlock(pcibr_soft, s);
1247 pcibr_piomap = kmalloc(sizeof (*(pcibr_piomap)), GFP_KERNEL);
1248 if ( !pcibr_piomap ) {
1249 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
1250 "pcibr_piomap_alloc: malloc fails\n"));
1253 memset(pcibr_piomap, 0, sizeof (*(pcibr_piomap)));
1256 pcibr_piomap->bp_dev = pconn_vhdl;
1257 pcibr_piomap->bp_slot = PCIBR_DEVICE_TO_SLOT(pcibr_soft, pciio_slot);
1258 pcibr_piomap->bp_flags = flags;
1259 pcibr_piomap->bp_space = space;
1260 pcibr_piomap->bp_pciaddr = pci_addr;
1261 pcibr_piomap->bp_mapsz = req_size;
1262 pcibr_piomap->bp_soft = pcibr_soft;
1263 pcibr_piomap->bp_toc = ATOMIC_INIT(0);
1266 s = pcibr_lock(pcibr_soft);
1268 pcibr_piomap->bp_next = maplist;
1269 *mapptr = pcibr_piomap;
1271 pcibr_unlock(pcibr_soft, s);
1276 xtalk_piomap_alloc(xconn_vhdl, 0,
1278 req_size, req_size_max,
1279 flags & PIOMAP_FLAGS);
1281 pcibr_piomap->bp_xtalk_addr = xio_addr;
1282 pcibr_piomap->bp_xtalk_pio = xtalk_piomap;
1284 pcibr_piomap->bp_mapsz = 0;
1289 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
1290 "pcibr_piomap_alloc: map=0x%lx\n", pcibr_piomap));
1292 return pcibr_piomap;
1297 pcibr_piomap_free(pcibr_piomap_t pcibr_piomap)
1299 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
1300 "pcibr_piomap_free: map=0x%lx\n", pcibr_piomap));
1302 xtalk_piomap_free(pcibr_piomap->bp_xtalk_pio);
1303 pcibr_piomap->bp_xtalk_pio = 0;
1304 pcibr_piomap->bp_mapsz = 0;
1309 pcibr_piomap_addr(pcibr_piomap_t pcibr_piomap,
1314 addr = xtalk_piomap_addr(pcibr_piomap->bp_xtalk_pio,
1315 pcibr_piomap->bp_xtalk_addr +
1316 pci_addr - pcibr_piomap->bp_pciaddr,
1318 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
1319 "pcibr_piomap_addr: map=0x%lx, addr=0x%lx\n",
1320 pcibr_piomap, addr));
1327 pcibr_piomap_done(pcibr_piomap_t pcibr_piomap)
1329 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
1330 "pcibr_piomap_done: map=0x%lx\n", pcibr_piomap));
1331 xtalk_piomap_done(pcibr_piomap->bp_xtalk_pio);
1336 pcibr_piotrans_addr(vertex_hdl_t pconn_vhdl,
1337 device_desc_t dev_desc,
1338 pciio_space_t space,
1343 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
1344 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
1345 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
1346 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
1351 xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
1353 if (xio_addr == XIO_NOWHERE) {
1354 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIODIR, pconn_vhdl,
1355 "pcibr_piotrans_addr: xio_addr == XIO_NOWHERE\n"));
1359 addr = xtalk_piotrans_addr(xconn_vhdl, 0, xio_addr, req_size, flags & PIOMAP_FLAGS);
1360 PCIBR_DEBUG((PCIBR_DEBUG_PIODIR, pconn_vhdl,
1361 "pcibr_piotrans_addr: xio_addr=0x%lx, addr=0x%lx\n",
1367 * PIO Space allocation and management.
1368 * Allocate and Manage the PCI PIO space (mem and io space)
1369 * This routine is pretty simplistic at this time, and
1370 * does pretty trivial management of allocation and freeing.
1371 * The current scheme is prone for fragmentation.
1372 * Change the scheme to use bitmaps.
1377 pcibr_piospace_alloc(vertex_hdl_t pconn_vhdl,
1378 device_desc_t dev_desc,
1379 pciio_space_t space,
1383 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
1384 pciio_info_t pciio_info = &pcibr_info->f_c;
1385 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
1387 pciio_piospace_t piosp;
1390 iopaddr_t start_addr;
1394 * Check for proper alignment
1396 ASSERT(alignment >= PAGE_SIZE);
1397 ASSERT((alignment & (alignment - 1)) == 0);
1399 align_mask = alignment - 1;
1400 s = pcibr_lock(pcibr_soft);
1403 * First look if a previously allocated chunk exists.
1405 piosp = pcibr_info->f_piospace;
1408 * Look through the list for a right sized free chunk.
1412 (piosp->space == space) &&
1413 (piosp->count >= req_size) &&
1414 !(piosp->start & align_mask)) {
1416 pcibr_unlock(pcibr_soft, s);
1417 return piosp->start;
1419 piosp = piosp->next;
1425 * Allocate PCI bus address, usually for the Universe chip driver;
1426 * do not pass window info since the actual PCI bus address
1427 * space will never be freed. The space may be reused after it
1428 * is logically released by pcibr_piospace_free().
1431 case PCIIO_SPACE_IO:
1432 start_addr = pcibr_bus_addr_alloc(pcibr_soft, NULL,
1434 0, req_size, alignment);
1437 case PCIIO_SPACE_MEM:
1438 case PCIIO_SPACE_MEM32:
1439 start_addr = pcibr_bus_addr_alloc(pcibr_soft, NULL,
1441 0, req_size, alignment);
1446 pcibr_unlock(pcibr_soft, s);
1447 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
1448 "pcibr_piospace_alloc: unknown space %d\n", space));
1453 * If too big a request, reject it.
1456 pcibr_unlock(pcibr_soft, s);
1457 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
1458 "pcibr_piospace_alloc: request 0x%lx to big\n", req_size));
1462 piosp = kmalloc(sizeof (*(piosp)), GFP_KERNEL);
1464 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
1465 "pcibr_piospace_alloc: malloc fails\n"));
1468 memset(piosp, 0, sizeof (*(piosp)));
1471 piosp->space = space;
1472 piosp->start = start_addr;
1473 piosp->count = req_size;
1474 piosp->next = pcibr_info->f_piospace;
1475 pcibr_info->f_piospace = piosp;
1477 pcibr_unlock(pcibr_soft, s);
1479 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
1480 "pcibr_piospace_alloc: piosp=0x%lx\n", piosp));
1485 #define ERR_MSG "!Device %s freeing size (0x%lx) different than allocated (0x%lx)"
1488 pcibr_piospace_free(vertex_hdl_t pconn_vhdl,
1489 pciio_space_t space,
1493 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
1494 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
1495 pciio_piospace_t piosp;
1500 * Look through the bridge data structures for the pciio_piospace_t
1501 * structure corresponding to 'pciaddr'
1503 s = pcibr_lock(pcibr_soft);
1504 piosp = pcibr_info->f_piospace;
1507 * Piospace free can only be for the complete
1508 * chunk and not parts of it..
1510 if (piosp->start == pciaddr) {
1511 if (piosp->count == req_size)
1514 * Improper size passed for freeing..
1515 * Print a message and break;
1517 hwgraph_vertex_name_get(pconn_vhdl, name, 1024);
1518 printk(KERN_WARNING "pcibr_piospace_free: error");
1519 printk(KERN_WARNING "Device %s freeing size (0x%lx) different than allocated (0x%lx)",
1520 name, req_size, piosp->count);
1521 printk(KERN_WARNING "Freeing 0x%lx instead", piosp->count);
1524 piosp = piosp->next;
1529 "pcibr_piospace_free: Address 0x%lx size 0x%lx - No match\n",
1531 pcibr_unlock(pcibr_soft, s);
1535 pcibr_unlock(pcibr_soft, s);
1537 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
1538 "pcibr_piospace_free: piosp=0x%lx\n", piosp));
1542 /* =====================================================================
1545 * The Bridge ASIC provides three methods of doing
1546 * DMA: via a "direct map" register available in
1547 * 32-bit PCI space (which selects a contiguous 2G
1548 * address space on some other widget), via
1549 * "direct" addressing via 64-bit PCI space (all
1550 * destination information comes from the PCI
1551 * address, including transfer attributes), and via
1552 * a "mapped" region that allows a bunch of
1553 * different small mappings to be established with
1556 * For efficiency, we most prefer to use the 32-bit
1557 * direct mapping facility, since it requires no
1558 * resource allocations. The advantage of using the
1559 * PMU over the 64-bit direct is that single-cycle
1560 * PCI addressing can be used; the advantage of
1561 * using 64-bit direct over PMU addressing is that
1562 * we do not have to allocate entries in the PMU.
1566 * Convert PCI-generic software flags and Bridge-specific software flags
1567 * into Bridge-specific Direct Map attribute bits.
1570 pcibr_flags_to_d64(unsigned flags, pcibr_soft_t pcibr_soft)
1572 iopaddr_t attributes = 0;
1574 /* Sanity check: Bridge only allows use of VCHAN1 via 64-bit addrs */
1576 ASSERT_ALWAYS(!(flags & PCIBR_VCHAN1) || (flags & PCIIO_DMA_A64));
1579 /* Generic macro flags
1581 if (flags & PCIIO_DMA_DATA) { /* standard data channel */
1582 attributes &= ~PCI64_ATTR_BAR; /* no barrier bit */
1583 attributes |= PCI64_ATTR_PREF; /* prefetch on */
1585 if (flags & PCIIO_DMA_CMD) { /* standard command channel */
1586 attributes |= PCI64_ATTR_BAR; /* barrier bit on */
1587 attributes &= ~PCI64_ATTR_PREF; /* disable prefetch */
1589 /* Generic detail flags
1591 if (flags & PCIIO_PREFETCH)
1592 attributes |= PCI64_ATTR_PREF;
1593 if (flags & PCIIO_NOPREFETCH)
1594 attributes &= ~PCI64_ATTR_PREF;
1596 /* the swap bit is in the address attributes for xbridge */
1597 if (flags & PCIIO_BYTE_STREAM)
1598 attributes |= PCI64_ATTR_SWAP;
1599 if (flags & PCIIO_WORD_VALUES)
1600 attributes &= ~PCI64_ATTR_SWAP;
1602 /* Provider-specific flags
1604 if (flags & PCIBR_BARRIER)
1605 attributes |= PCI64_ATTR_BAR;
1606 if (flags & PCIBR_NOBARRIER)
1607 attributes &= ~PCI64_ATTR_BAR;
1609 if (flags & PCIBR_PREFETCH)
1610 attributes |= PCI64_ATTR_PREF;
1611 if (flags & PCIBR_NOPREFETCH)
1612 attributes &= ~PCI64_ATTR_PREF;
1614 if (flags & PCIBR_PRECISE)
1615 attributes |= PCI64_ATTR_PREC;
1616 if (flags & PCIBR_NOPRECISE)
1617 attributes &= ~PCI64_ATTR_PREC;
1619 if (flags & PCIBR_VCHAN1)
1620 attributes |= PCI64_ATTR_VIRTUAL;
1621 if (flags & PCIBR_VCHAN0)
1622 attributes &= ~PCI64_ATTR_VIRTUAL;
1624 /* PIC in PCI-X mode only supports barrier & swap */
1625 if (IS_PCIX(pcibr_soft)) {
1626 attributes &= (PCI64_ATTR_BAR | PCI64_ATTR_SWAP);
1634 pcibr_dmamap_alloc(vertex_hdl_t pconn_vhdl,
1635 device_desc_t dev_desc,
1636 size_t req_size_max,
1639 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
1640 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
1641 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
1643 xwidgetnum_t xio_port;
1645 xtalk_dmamap_t xtalk_dmamap;
1646 pcibr_dmamap_t pcibr_dmamap;
1652 /* merge in forced flags */
1653 flags |= pcibr_soft->bs_dma_flags;
1656 * On SNIA64, these maps are pre-allocated because pcibr_dmamap_alloc()
1657 * can be called within an interrupt thread.
1659 s = pcibr_lock(pcibr_soft);
1660 pcibr_dmamap = (pcibr_dmamap_t)get_free_pciio_dmamap(pcibr_soft->bs_vhdl);
1661 pcibr_unlock(pcibr_soft, s);
1666 xtalk_dmamap = xtalk_dmamap_alloc(xconn_vhdl, dev_desc, req_size_max,
1667 flags & DMAMAP_FLAGS);
1668 if (!xtalk_dmamap) {
1669 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
1670 "pcibr_dmamap_alloc: xtalk_dmamap_alloc failed\n"));
1671 free_pciio_dmamap(pcibr_dmamap);
1674 xio_port = pcibr_soft->bs_mxid;
1675 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
1677 pcibr_dmamap->bd_dev = pconn_vhdl;
1678 pcibr_dmamap->bd_slot = PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot);
1679 pcibr_dmamap->bd_soft = pcibr_soft;
1680 pcibr_dmamap->bd_xtalk = xtalk_dmamap;
1681 pcibr_dmamap->bd_max_size = req_size_max;
1682 pcibr_dmamap->bd_xio_port = xio_port;
1684 if (flags & PCIIO_DMA_A64) {
1685 if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D64_BITS)) {
1690 /* Device is capable of A64 operations,
1691 * and the attributes of the DMA are
1692 * consistent with any previous DMA
1693 * mappings using shared resources.
1696 pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
1698 pcibr_dmamap->bd_flags = flags;
1699 pcibr_dmamap->bd_xio_addr = 0;
1700 pcibr_dmamap->bd_pci_addr = pci_addr;
1702 /* If in PCI mode, make sure we have an RRB (or two).
1704 if (IS_PCI(pcibr_soft) &&
1705 !(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
1706 if (flags & PCIBR_VCHAN1)
1708 have_rrbs = pcibr_soft->bs_rrb_valid[slot][vchan];
1709 if (have_rrbs < 2) {
1710 if (pci_addr & PCI64_ATTR_PREF)
1714 if (have_rrbs < min_rrbs)
1715 pcibr_rrb_alloc_more(pcibr_soft, slot, vchan,
1716 min_rrbs - have_rrbs);
1719 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
1720 "pcibr_dmamap_alloc: using direct64, map=0x%lx\n",
1722 return pcibr_dmamap;
1724 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
1725 "pcibr_dmamap_alloc: unable to use direct64\n"));
1727 /* PIC in PCI-X mode only supports 64-bit direct mapping so
1728 * don't fall thru and try 32-bit direct mapping or 32-bit
1731 if (IS_PCIX(pcibr_soft)) {
1732 kfree(pcibr_dmamap);
1736 flags &= ~PCIIO_DMA_A64;
1738 if (flags & PCIIO_FIXED) {
1739 /* warning: mappings may fail later,
1740 * if direct32 can't get to the address.
1742 if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D32_BITS)) {
1743 /* User desires DIRECT A32 operations,
1744 * and the attributes of the DMA are
1745 * consistent with any previous DMA
1746 * mappings using shared resources.
1747 * Mapping calls may fail if target
1748 * is outside the direct32 range.
1750 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
1751 "pcibr_dmamap_alloc: using direct32, map=0x%lx\n",
1753 pcibr_dmamap->bd_flags = flags;
1754 pcibr_dmamap->bd_xio_addr = pcibr_soft->bs_dir_xbase;
1755 pcibr_dmamap->bd_pci_addr = PCI32_DIRECT_BASE;
1756 return pcibr_dmamap;
1758 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
1759 "pcibr_dmamap_alloc: unable to use direct32\n"));
1761 /* If the user demands FIXED and we can't
1762 * give it to him, fail.
1764 xtalk_dmamap_free(xtalk_dmamap);
1765 free_pciio_dmamap(pcibr_dmamap);
1769 * Allocate Address Translation Entries from the mapping RAM.
1770 * Unless the PCIBR_NO_ATE_ROUNDUP flag is specified,
1771 * the maximum number of ATEs is based on the worst-case
1772 * scenario, where the requested target is in the
1773 * last byte of an ATE; thus, mapping IOPGSIZE+2
1774 * does end up requiring three ATEs.
1776 if (!(flags & PCIBR_NO_ATE_ROUNDUP)) {
1777 ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
1778 +req_size_max /* max mapping bytes */
1779 - 1) + 1; /* round UP */
1780 } else { /* assume requested target is page aligned */
1781 ate_count = IOPG(req_size_max /* max mapping bytes */
1782 - 1) + 1; /* round UP */
1785 ate_index = pcibr_ate_alloc(pcibr_soft, ate_count, &pcibr_dmamap->resource);
1787 if (ate_index != -1) {
1788 if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_PMU_BITS)) {
1789 bridge_ate_t ate_proto;
1793 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
1794 "pcibr_dmamap_alloc: using PMU, ate_index=%d, "
1795 "pcibr_dmamap=0x%lx\n", ate_index, pcibr_dmamap));
1797 ate_proto = pcibr_flags_to_ate(pcibr_soft, flags);
1799 pcibr_dmamap->bd_flags = flags;
1800 pcibr_dmamap->bd_pci_addr =
1801 PCI32_MAPPED_BASE + IOPGSIZE * ate_index;
1803 if (flags & PCIIO_BYTE_STREAM)
1804 ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
1806 * If swap was set in bss_device in pcibr_endian_set()
1807 * we need to change the address bit.
1809 if (pcibr_soft->bs_slot[slot].bss_device &
1810 BRIDGE_DEV_SWAP_PMU)
1811 ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
1812 if (flags & PCIIO_WORD_VALUES)
1813 ATE_SWAP_OFF(pcibr_dmamap->bd_pci_addr);
1814 pcibr_dmamap->bd_xio_addr = 0;
1815 pcibr_dmamap->bd_ate_ptr = pcibr_ate_addr(pcibr_soft, ate_index);
1816 pcibr_dmamap->bd_ate_index = ate_index;
1817 pcibr_dmamap->bd_ate_count = ate_count;
1818 pcibr_dmamap->bd_ate_proto = ate_proto;
1820 /* Make sure we have an RRB (or two).
1822 if (!(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
1823 have_rrbs = pcibr_soft->bs_rrb_valid[slot][vchan];
1824 if (have_rrbs < 2) {
1825 if (ate_proto & ATE_PREF)
1829 if (have_rrbs < min_rrbs)
1830 pcibr_rrb_alloc_more(pcibr_soft, slot, vchan,
1831 min_rrbs - have_rrbs);
1834 return pcibr_dmamap;
1836 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
1837 "pcibr_dmamap_alloc: PMU use failed, ate_index=%d\n",
1840 pcibr_ate_free(pcibr_soft, ate_index, ate_count, &pcibr_dmamap->resource);
1842 /* total failure: sorry, you just can't
1843 * get from here to there that way.
1845 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
1846 "pcibr_dmamap_alloc: complete failure.\n"));
1847 xtalk_dmamap_free(xtalk_dmamap);
1848 free_pciio_dmamap(pcibr_dmamap);
1854 pcibr_dmamap_free(pcibr_dmamap_t pcibr_dmamap)
1856 pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
1857 pciio_slot_t slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft,
1858 pcibr_dmamap->bd_slot);
1860 xtalk_dmamap_free(pcibr_dmamap->bd_xtalk);
1862 if (pcibr_dmamap->bd_flags & PCIIO_DMA_A64) {
1863 pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_D64_BITS);
1865 if (pcibr_dmamap->bd_ate_count) {
1866 pcibr_ate_free(pcibr_dmamap->bd_soft,
1867 pcibr_dmamap->bd_ate_index,
1868 pcibr_dmamap->bd_ate_count,
1869 &pcibr_dmamap->resource);
1870 pcibr_release_device(pcibr_soft, slot, XBRIDGE_DEV_PMU_BITS);
1873 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
1874 "pcibr_dmamap_free: pcibr_dmamap=0x%lx\n", pcibr_dmamap));
1876 free_pciio_dmamap(pcibr_dmamap);
1880 * pcibr_addr_xio_to_pci: given a PIO range, hand
1881 * back the corresponding base PCI MEM address;
1882 * this is used to short-circuit DMA requests that
1883 * loop back onto this PCI bus.
1886 pcibr_addr_xio_to_pci(pcibr_soft_t soft,
1890 iopaddr_t xio_lim = xio_addr + req_size - 1;
1894 if (IS_PIC_BUSNUM_SOFT(soft, 0)) {
1895 if ((xio_addr >= PICBRIDGE0_PCI_MEM32_BASE) &&
1896 (xio_lim <= PICBRIDGE0_PCI_MEM32_LIMIT)) {
1897 pci_addr = xio_addr - PICBRIDGE0_PCI_MEM32_BASE;
1900 if ((xio_addr >= PICBRIDGE0_PCI_MEM64_BASE) &&
1901 (xio_lim <= PICBRIDGE0_PCI_MEM64_LIMIT)) {
1902 pci_addr = xio_addr - PICBRIDGE0_PCI_MEM64_BASE;
1905 } else if (IS_PIC_BUSNUM_SOFT(soft, 1)) {
1906 if ((xio_addr >= PICBRIDGE1_PCI_MEM32_BASE) &&
1907 (xio_lim <= PICBRIDGE1_PCI_MEM32_LIMIT)) {
1908 pci_addr = xio_addr - PICBRIDGE1_PCI_MEM32_BASE;
1911 if ((xio_addr >= PICBRIDGE1_PCI_MEM64_BASE) &&
1912 (xio_lim <= PICBRIDGE1_PCI_MEM64_LIMIT)) {
1913 pci_addr = xio_addr - PICBRIDGE1_PCI_MEM64_BASE;
1917 printk("pcibr_addr_xio_to_pci(): unknown bridge type");
1918 return (iopaddr_t)0;
1920 for (slot = soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(soft); ++slot)
1921 if ((xio_addr >= PCIBR_BRIDGE_DEVIO(soft, slot)) &&
1922 (xio_lim < PCIBR_BRIDGE_DEVIO(soft, slot + 1))) {
1925 dev = soft->bs_slot[slot].bss_device;
1926 pci_addr = dev & BRIDGE_DEV_OFF_MASK;
1927 pci_addr <<= BRIDGE_DEV_OFF_ADDR_SHFT;
1928 pci_addr += xio_addr - PCIBR_BRIDGE_DEVIO(soft, slot);
1929 return (dev & BRIDGE_DEV_DEV_IO_MEM) ? pci_addr : PCI_NOWHERE;
1936 pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,
1940 pcibr_soft_t pcibr_soft;
1942 xwidgetnum_t xio_port;
1946 ASSERT(pcibr_dmamap != NULL);
1947 ASSERT(req_size > 0);
1948 ASSERT(req_size <= pcibr_dmamap->bd_max_size);
1950 pcibr_soft = pcibr_dmamap->bd_soft;
1952 flags = pcibr_dmamap->bd_flags;
1954 xio_addr = xtalk_dmamap_addr(pcibr_dmamap->bd_xtalk, paddr, req_size);
1955 if (XIO_PACKED(xio_addr)) {
1956 xio_port = XIO_PORT(xio_addr);
1957 xio_addr = XIO_ADDR(xio_addr);
1959 xio_port = pcibr_dmamap->bd_xio_port;
1961 /* If this DMA is to an address that
1962 * refers back to this Bridge chip,
1963 * reduce it back to the correct
1966 if (xio_port == pcibr_soft->bs_xid) {
1967 pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
1968 } else if (flags & PCIIO_DMA_A64) {
1970 * always use 64-bit direct mapping,
1971 * which always works.
1972 * Device(x) was set up during
1973 * dmamap allocation.
1976 /* attributes are already bundled up into bd_pci_addr.
1978 pci_addr = pcibr_dmamap->bd_pci_addr
1979 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT)
1982 /* Bridge Hardware WAR #482836:
1983 * If the transfer is not cache aligned
1984 * and the Bridge Rev is <= B, force
1985 * prefetch to be off.
1987 if (flags & PCIBR_NOPREFETCH)
1988 pci_addr &= ~PCI64_ATTR_PREF;
1990 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR,
1991 pcibr_dmamap->bd_dev,
1992 "pcibr_dmamap_addr: (direct64): wanted paddr [0x%lx..0x%lx] "
1993 "XIO port 0x%x offset 0x%lx, returning PCI 0x%lx\n",
1994 paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));
1996 } else if (flags & PCIIO_FIXED) {
1998 * always use 32-bit direct mapping,
2000 * Device(x) was set up during
2001 * dmamap allocation.
2004 if (xio_port != pcibr_soft->bs_dir_xport)
2005 pci_addr = 0; /* wrong DIDN */
2006 else if (xio_addr < pcibr_dmamap->bd_xio_addr)
2007 pci_addr = 0; /* out of range */
2008 else if ((xio_addr + req_size) >
2009 (pcibr_dmamap->bd_xio_addr + BRIDGE_DMA_DIRECT_SIZE))
2010 pci_addr = 0; /* out of range */
2012 pci_addr = pcibr_dmamap->bd_pci_addr +
2013 xio_addr - pcibr_dmamap->bd_xio_addr;
2015 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR,
2016 pcibr_dmamap->bd_dev,
2017 "pcibr_dmamap_addr (direct32): wanted paddr [0x%lx..0x%lx] "
2018 "XIO port 0x%x offset 0x%lx, returning PCI 0x%lx\n",
2019 paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));
2022 iopaddr_t offset = IOPGOFF(xio_addr);
2023 bridge_ate_t ate_proto = pcibr_dmamap->bd_ate_proto;
2024 int ate_count = IOPG(offset + req_size - 1) + 1;
2025 int ate_index = pcibr_dmamap->bd_ate_index;
2028 ate = ate_proto | (xio_addr - offset);
2029 ate |= (xio_port << ATE_TIDSHIFT);
2031 pci_addr = pcibr_dmamap->bd_pci_addr + offset;
2033 /* Fill in our mapping registers
2034 * with the appropriate xtalk data,
2035 * and hand back the PCI address.
2038 ASSERT(ate_count > 0);
2039 if (ate_count <= pcibr_dmamap->bd_ate_count) {
2040 ate_write(pcibr_soft, ate_index, ate_count, ate);
2042 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
2043 "pcibr_dmamap_addr (PMU) : wanted paddr "
2044 "[0x%lx..0x%lx] returning PCI 0x%lx\n",
2045 paddr, paddr + req_size - 1, pci_addr));
2048 /* The number of ATE's required is greater than the number
2049 * allocated for this map. One way this can happen is if
2050 * pcibr_dmamap_alloc() was called with the PCIBR_NO_ATE_ROUNDUP
2051 * flag, and then when that map is used (right now), the
2052 * target address tells us we really did need to roundup.
2053 * The other possibility is that the map is just plain too
2054 * small to handle the requested target area.
2056 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
2057 "pcibr_dmamap_addr (PMU) : wanted paddr "
2058 "[0x%lx..0x%lx] ate_count 0x%x bd_ate_count 0x%x "
2059 "ATE's required > number allocated\n",
2060 paddr, paddr + req_size - 1,
2061 ate_count, pcibr_dmamap->bd_ate_count));
2071 pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
2073 xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
2075 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
2076 "pcibr_dmamap_done: pcibr_dmamap=0x%lx\n", pcibr_dmamap));
2081 * For each bridge, the DIR_OFF value in the Direct Mapping Register
2082 * determines the PCI to Crosstalk memory mapping to be used for all
2083 * 32-bit Direct Mapping memory accesses. This mapping can be to any
2084 * node in the system. This function will return that compact node id.
2089 pcibr_get_dmatrans_node(vertex_hdl_t pconn_vhdl)
2092 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
2093 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2095 return nasid_to_cnodeid(NASID_GET(pcibr_soft->bs_dir_xbase));
2100 pcibr_dmatrans_addr(vertex_hdl_t pconn_vhdl,
2101 device_desc_t dev_desc,
2106 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
2107 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2108 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
2109 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2110 pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[pciio_slot];
2112 xwidgetnum_t xio_port;
2120 /* merge in forced flags */
2121 flags |= pcibr_soft->bs_dma_flags;
2123 xio_addr = xtalk_dmatrans_addr(xconn_vhdl, 0, paddr, req_size,
2124 flags & DMAMAP_FLAGS);
2126 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMADIR, pconn_vhdl,
2127 "pcibr_dmatrans_addr: wanted paddr [0x%lx..0x%lx], "
2128 "xtalk_dmatrans_addr failed with 0x%lx\n",
2129 paddr, paddr + req_size - 1, xio_addr));
2133 * find which XIO port this goes to.
2135 if (XIO_PACKED(xio_addr)) {
2136 if (xio_addr == XIO_NOWHERE) {
2137 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMADIR, pconn_vhdl,
2138 "pcibr_dmatrans_addr: wanted paddr [0x%lx..0x%lx], "
2139 "xtalk_dmatrans_addr failed with XIO_NOWHERE\n",
2140 paddr, paddr + req_size - 1));
2143 xio_port = XIO_PORT(xio_addr);
2144 xio_addr = XIO_ADDR(xio_addr);
2147 xio_port = pcibr_soft->bs_mxid;
2150 * If this DMA comes back to us,
2151 * return the PCI MEM address on
2152 * which it would land, or NULL
2153 * if the target is something
2154 * on bridge other than PCI MEM.
2156 if (xio_port == pcibr_soft->bs_xid) {
2157 pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
2158 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
2159 "pcibr_dmatrans_addr: wanted paddr [0x%lx..0x%lx], "
2160 "xio_port=0x%x, pci_addr=0x%lx\n",
2161 paddr, paddr + req_size - 1, xio_port, pci_addr));
2164 /* If the caller can use A64, try to
2165 * satisfy the request with the 64-bit
2166 * direct map. This can fail if the
2167 * configuration bits in Device(x)
2168 * conflict with our flags.
2171 if (flags & PCIIO_DMA_A64) {
2172 pci_addr = slotp->bss_d64_base;
2173 if (!(flags & PCIBR_VCHAN1))
2174 flags |= PCIBR_VCHAN0;
2175 if ((pci_addr != PCIBR_D64_BASE_UNSET) &&
2176 (flags == slotp->bss_d64_flags)) {
2178 pci_addr |= xio_addr |
2179 ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
2180 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
2181 "pcibr_dmatrans_addr: wanted paddr [0x%lx..0x%lx], "
2182 "xio_port=0x%x, direct64: pci_addr=0x%lx\n",
2183 paddr, paddr + req_size - 1, xio_addr, pci_addr));
2186 if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS)) {
2187 pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
2188 slotp->bss_d64_flags = flags;
2189 slotp->bss_d64_base = pci_addr;
2190 pci_addr |= xio_addr
2191 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
2193 /* If in PCI mode, make sure we have an RRB (or two).
2195 if (IS_PCI(pcibr_soft) &&
2196 !(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
2197 if (flags & PCIBR_VCHAN1)
2199 have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot][vchan];
2200 if (have_rrbs < 2) {
2201 if (pci_addr & PCI64_ATTR_PREF)
2205 if (have_rrbs < min_rrbs)
2206 pcibr_rrb_alloc_more(pcibr_soft, pciio_slot, vchan,
2207 min_rrbs - have_rrbs);
2210 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
2211 "pcibr_dmatrans_addr: wanted paddr [0x%lx..0x%lx], "
2212 "xio_port=0x%x, direct64: pci_addr=0x%lx, "
2213 "new flags: 0x%x\n", paddr, paddr + req_size - 1,
2214 xio_addr, pci_addr, (uint64_t) flags));
2218 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
2219 "pcibr_dmatrans_addr: wanted paddr [0x%lx..0x%lx], "
2220 "xio_port=0x%x, Unable to set direct64 Device(x) bits\n",
2221 paddr, paddr + req_size - 1, xio_addr));
2223 /* PIC only supports 64-bit direct mapping in PCI-X mode */
2224 if (IS_PCIX(pcibr_soft)) {
2228 /* our flags conflict with Device(x). try direct32*/
2229 flags = flags & ~(PCIIO_DMA_A64 | PCIBR_VCHAN0);
2231 /* BUS in PCI-X mode only supports 64-bit direct mapping */
2232 if (IS_PCIX(pcibr_soft)) {
2236 /* Try to satisfy the request with the 32-bit direct
2237 * map. This can fail if the configuration bits in
2238 * Device(x) conflict with our flags, or if the
2239 * target address is outside where DIR_OFF points.
2242 size_t map_size = 1ULL << 31;
2243 iopaddr_t xio_base = pcibr_soft->bs_dir_xbase;
2244 iopaddr_t offset = xio_addr - xio_base;
2245 iopaddr_t endoff = req_size + offset;
2247 if ((req_size > map_size) ||
2248 (xio_addr < xio_base) ||
2249 (xio_port != pcibr_soft->bs_dir_xport) ||
2250 (endoff > map_size)) {
2252 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
2253 "pcibr_dmatrans_addr: wanted paddr [0x%lx..0x%lx], "
2254 "xio_port=0x%x, xio region outside direct32 target\n",
2255 paddr, paddr + req_size - 1, xio_addr));
2257 pci_addr = slotp->bss_d32_base;
2258 if ((pci_addr != PCIBR_D32_BASE_UNSET) &&
2259 (flags == slotp->bss_d32_flags)) {
2263 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
2264 "pcibr_dmatrans_addr: wanted paddr [0x%lx..0x%lx],"
2265 " xio_port=0x%x, direct32: pci_addr=0x%lx\n",
2266 paddr, paddr + req_size - 1, xio_addr, pci_addr));
2270 if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D32_BITS)) {
2272 pci_addr = PCI32_DIRECT_BASE;
2273 slotp->bss_d32_flags = flags;
2274 slotp->bss_d32_base = pci_addr;
2277 /* Make sure we have an RRB (or two).
2279 if (!(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
2280 have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot][vchan];
2281 if (have_rrbs < 2) {
2282 if (slotp->bss_device & BRIDGE_DEV_PREF)
2286 if (have_rrbs < min_rrbs)
2287 pcibr_rrb_alloc_more(pcibr_soft, pciio_slot,
2288 vchan, min_rrbs - have_rrbs);
2291 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
2292 "pcibr_dmatrans_addr: wanted paddr [0x%lx..0x%lx],"
2293 " xio_port=0x%x, direct32: pci_addr=0x%lx, "
2294 "new flags: 0x%x\n", paddr, paddr + req_size - 1,
2295 xio_addr, pci_addr, (uint64_t) flags));
2299 /* our flags conflict with Device(x).
2301 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
2302 "pcibr_dmatrans_addr: wanted paddr [0x%lx..0x%lx], "
2303 "xio_port=0x%x, Unable to set direct32 Device(x) bits\n",
2304 paddr, paddr + req_size - 1, xio_port));
2308 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
2309 "pcibr_dmatrans_addr: wanted paddr [0x%lx..0x%lx], "
2310 "xio_port=0x%x, No acceptable PCI address found\n",
2311 paddr, paddr + req_size - 1, xio_port));
2317 pcibr_dmamap_drain(pcibr_dmamap_t map)
2319 xtalk_dmamap_drain(map->bd_xtalk);
2323 pcibr_dmaaddr_drain(vertex_hdl_t pconn_vhdl,
2327 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
2328 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2329 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
2331 xtalk_dmaaddr_drain(xconn_vhdl, paddr, bytes);
2335 * Get the starting PCIbus address out of the given DMA map.
2336 * This function is supposed to be used by a close friend of PCI bridge
2337 * since it relies on the fact that the starting address of the map is fixed at
2338 * the allocation time in the current implementation of PCI bridge.
2341 pcibr_dmamap_pciaddr_get(pcibr_dmamap_t pcibr_dmamap)
2343 return pcibr_dmamap->bd_pci_addr;
2346 /* =====================================================================
2347 * CONFIGURATION MANAGEMENT
2351 pcibr_provider_startup(vertex_hdl_t pcibr)
2357 pcibr_provider_shutdown(vertex_hdl_t pcibr)
2362 pcibr_reset(vertex_hdl_t conn)
2369 pcibr_endian_set(vertex_hdl_t pconn_vhdl,
2370 pciio_endian_t device_end,
2371 pciio_endian_t desired_end)
2373 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
2374 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2375 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2380 * Bridge supports hardware swapping; so we can always
2381 * arrange for the caller's desired endianness.
2384 s = pcibr_lock(pcibr_soft);
2385 devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
2386 if (device_end != desired_end)
2387 devreg |= BRIDGE_DEV_SWAP_BITS;
2389 devreg &= ~BRIDGE_DEV_SWAP_BITS;
2391 /* NOTE- if we ever put SWAP bits
2392 * onto the disabled list, we will
2393 * have to change the logic here.
2395 if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
2396 pcireg_device_set(pcibr_soft, pciio_slot, devreg);
2397 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
2398 pcireg_tflush_get(pcibr_soft);
2400 pcibr_unlock(pcibr_soft, s);
2402 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
2403 "pcibr_endian_set: Device(%d): 0x%x\n",
2404 pciio_slot, devreg));
2410 * Interfaces to allow special (e.g. SGI) drivers to set/clear
2411 * Bridge-specific device flags. Many flags are modified through
2412 * PCI-generic interfaces; we don't allow them to be directly
2413 * manipulated here. Only flags that at this point seem pretty
2414 * Bridge-specific can be set through these special interfaces.
2415 * We may add more flags as the need arises, or remove flags and
2416 * create PCI-generic interfaces as the need arises.
2418 * Returns 0 on failure, 1 on success
2421 pcibr_device_flags_set(vertex_hdl_t pconn_vhdl,
2422 pcibr_device_flags_t flags)
2424 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
2425 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2426 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2430 ASSERT((flags & PCIBR_DEVICE_FLAGS) == flags);
2432 if (flags & PCIBR_WRITE_GATHER)
2433 set |= BRIDGE_DEV_PMU_WRGA_EN;
2434 if (flags & PCIBR_NOWRITE_GATHER)
2435 clr |= BRIDGE_DEV_PMU_WRGA_EN;
2437 if (flags & PCIBR_PREFETCH)
2438 set |= BRIDGE_DEV_PREF;
2439 if (flags & PCIBR_NOPREFETCH)
2440 clr |= BRIDGE_DEV_PREF;
2442 if (flags & PCIBR_PRECISE)
2443 set |= BRIDGE_DEV_PRECISE;
2444 if (flags & PCIBR_NOPRECISE)
2445 clr |= BRIDGE_DEV_PRECISE;
2447 if (flags & PCIBR_BARRIER)
2448 set |= BRIDGE_DEV_BARRIER;
2449 if (flags & PCIBR_NOBARRIER)
2450 clr |= BRIDGE_DEV_BARRIER;
2452 if (flags & PCIBR_64BIT)
2453 set |= BRIDGE_DEV_DEV_SIZE;
2454 if (flags & PCIBR_NO64BIT)
2455 clr |= BRIDGE_DEV_DEV_SIZE;
2457 /* PIC BRINGUP WAR (PV# 878674): Don't allow 64bit PIO accesses */
2458 if ((flags & PCIBR_64BIT) && PCIBR_WAR_ENABLED(PV878674, pcibr_soft)) {
2459 set &= ~BRIDGE_DEV_DEV_SIZE;
2466 s = pcibr_lock(pcibr_soft);
2467 devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
2468 devreg = (devreg & ~clr) | set;
2469 if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
2470 pcireg_device_set(pcibr_soft, pciio_slot, devreg);
2471 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
2472 pcireg_tflush_get(pcibr_soft);
2474 pcibr_unlock(pcibr_soft, s);
2476 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
2477 "pcibr_device_flags_set: Device(%d): 0x%x\n",
2478 pciio_slot, devreg));
2484 * PIC has 16 RBARs per bus; meaning it can have a total of 16 outstanding
2485 * split transactions. If the functions on the bus have requested a total
2486 * of 16 or less, then we can give them what they requested (ie. 100%).
2487 * Otherwise we have make sure each function can get at least one buffer
2488 * and then divide the rest of the buffers up among the functions as ``A
2489 * PERCENTAGE OF WHAT THEY REQUESTED'' (i.e. 0% - 100% of a function's
2490 * pcix_type0_status.max_out_split). This percentage does not include the
2491 * one RBAR that all functions get by default.
2494 pcibr_pcix_rbars_calc(pcibr_soft_t pcibr_soft)
2496 /* 'percent_allowed' is the percentage of requested RBARs that functions
2497 * are allowed, ***less the 1 RBAR that all functions get by default***
2499 int percent_allowed;
2501 if (pcibr_soft->bs_pcix_num_funcs) {
2502 if (pcibr_soft->bs_pcix_num_funcs > NUM_RBAR) {
2504 "%s: Must oversubscribe Read Buffer Attribute Registers"
2505 "(RBAR). Bus has %d RBARs but %d funcs need them.\n",
2506 pcibr_soft->bs_name, NUM_RBAR, pcibr_soft->bs_pcix_num_funcs);
2507 percent_allowed = 0;
2509 percent_allowed = (((NUM_RBAR-pcibr_soft->bs_pcix_num_funcs)*100) /
2510 pcibr_soft->bs_pcix_split_tot);
2512 /* +1 to percentage to solve rounding errors that occur because
2513 * we're not doing fractional math. (ie. ((3 * 66%) / 100) = 1)
2514 * but should be "2" if doing true fractional math. NOTE: Since
2515 * the greatest number of outstanding transactions a function
2516 * can request is 32, this "+1" will always work (i.e. we won't
2517 * accidentally oversubscribe the RBARs because of this rounding
2518 * of the percentage).
2520 percent_allowed=(percent_allowed > 100) ? 100 : percent_allowed+1;
2526 return percent_allowed;
2530 * pcibr_debug() is used to print pcibr debug messages to the console. A
2531 * user enables tracing by setting the following global variables:
2533 * pcibr_debug_mask -Bitmask of what to trace. see pcibr_private.h
2534 * pcibr_debug_module -Module to trace. 'all' means trace all modules
2535 * pcibr_debug_widget -Widget to trace. '-1' means trace all widgets
2536 * pcibr_debug_slot -Slot to trace. '-1' means trace all slots
2538 * 'type' is the type of debugging that the current PCIBR_DEBUG macro is
2539 * tracing. 'vhdl' (which can be NULL) is the vhdl associated with the
2540 * debug statement. If there is a 'vhdl' associated with this debug
2541 * statement, it is parsed to obtain the module, widget, and slot. If the
2542 * globals above match the PCIBR_DEBUG params, then the debug info in the
2543 * parameter 'format' is sent to the console.
2546 pcibr_debug(uint32_t type, vertex_hdl_t vhdl, char *format, ...)
2548 char hwpath[MAXDEVNAME] = "\0";
2549 char copy_of_hwpath[MAXDEVNAME];
2551 char *module = "all";
2556 if (pcibr_debug_mask & type) {
2558 if (!hwgraph_vertex_name_get(vhdl, hwpath, MAXDEVNAME)) {
2561 if (strcmp(module, pcibr_debug_module)) {
2563 (void)strcpy(copy_of_hwpath, hwpath);
2564 cp = strstr(copy_of_hwpath, "/" EDGE_LBL_MODULE "/");
2566 cp += strlen("/" EDGE_LBL_MODULE "/");
2567 module = strsep(&cp, "/");
2570 if (pcibr_debug_widget != -1) {
2571 cp = strstr(hwpath, "/" EDGE_LBL_XTALK "/");
2573 cp += strlen("/" EDGE_LBL_XTALK "/");
2574 widget = simple_strtoul(cp, NULL, 0);
2577 if (pcibr_debug_slot != -1) {
2578 cp = strstr(hwpath, "/" EDGE_LBL_PCIX_0 "/");
2580 cp = strstr(hwpath, "/" EDGE_LBL_PCIX_1 "/");
2583 cp += strlen("/" EDGE_LBL_PCIX_0 "/");
2584 slot = simple_strtoul(cp, NULL, 0);
2589 if ((vhdl == NULL) ||
2590 (!strcmp(module, pcibr_debug_module) &&
2591 (widget == pcibr_debug_widget) &&
2592 (slot == pcibr_debug_slot))) {
2594 buffer = kmalloc(1024, GFP_KERNEL);
2596 printk("PCIBR_DEBUG<%d>\t: %s :", smp_processor_id(), hwpath);
2598 * KERN_MSG translates to this 3 line sequence. Since
2599 * we have a variable length argument list, we need to
2600 * call KERN_MSG this way rather than directly
2602 va_start(ap, format);
2603 memset(buffer, 0, 1024);
2604 vsnprintf(buffer, 1024, format, ap);
2606 printk("%s", buffer);
2614 * given a xconn_vhdl and a bus number under that widget, return a
2618 pcibr_bridge_ptr_get(vertex_hdl_t widget_vhdl, int bus_num)
2622 bridge = (void *)xtalk_piotrans_addr(widget_vhdl, 0, 0,
2625 /* PIC ASIC has two bridges (ie. two buses) under a single widget */
2627 bridge = (void *)((char *)bridge + PIC_BUS1_OFFSET);
2634 isIO9(nasid_t nasid)
2636 lboard_t *brd = (lboard_t *)KL_CONFIG_INFO(nasid);
2639 if (brd->brd_flags & LOCAL_MASTER_IO6) {
2642 if (numionodes == numnodes)
2643 brd = KLCF_NEXT_ANY(brd);
2645 brd = KLCF_NEXT(brd);
2647 /* if it's dual ported, check the peer also */
2648 nasid = NODEPDA(nasid_to_cnodeid(nasid))->xbow_peer;
2649 if (nasid < 0) return 0;
2650 brd = (lboard_t *)KL_CONFIG_INFO(nasid);
2652 if (brd->brd_flags & LOCAL_MASTER_IO6) {
2655 if (numionodes == numnodes)
2656 brd = KLCF_NEXT_ANY(brd);
2658 brd = KLCF_NEXT(brd);