2 * pci.c - Low-Level PCI Access in IA-64
4 * Derived from bios32.c of i386 tree.
6 * Copyright (C) 2002 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Bjorn Helgaas <bjorn_helgaas@hp.com>
10 * Note: Above list of copyright holders is incomplete...
12 #include <linux/config.h>
14 #include <linux/acpi.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/smp_lock.h>
22 #include <linux/spinlock.h>
24 #include <asm/machvec.h>
26 #include <asm/segment.h>
27 #include <asm/system.h>
37 #include <asm/hw_irq.h>
44 #define DBG(x...) printk(x)
50 * Low-level SAL-based PCI configuration access functions. Note that SAL
51 * calls are already serialized (via sal_lock), so we don't need another
52 * synchronization mechanism here.
55 #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
56 ((u64)(seg << 24) | (u64)(bus << 16) | \
57 (u64)(devfn << 8) | (u64)(reg))
59 /* SAL 3.2 adds support for extended config space. */
61 #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
62 ((u64)(seg << 28) | (u64)(bus << 20) | \
63 (u64)(devfn << 12) | (u64)(reg))
66 pci_sal_read (int seg, int bus, int devfn, int reg, int len, u32 *value)
68 u64 addr, mode, data = 0;
71 if ((seg > 255) || (bus > 255) || (devfn > 255) || (reg > 4095))
74 if ((seg | reg) <= 255) {
75 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
78 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
81 result = ia64_sal_pci_config_read(addr, mode, len, &data);
89 pci_sal_write (int seg, int bus, int devfn, int reg, int len, u32 value)
93 if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
96 if ((seg | reg) <= 255) {
97 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
100 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
103 return ia64_sal_pci_config_write(addr, mode, len, value);
106 static struct pci_raw_ops pci_sal_ops = {
107 .read = pci_sal_read,
108 .write = pci_sal_write
111 struct pci_raw_ops *raw_pci_ops = &pci_sal_ops;
114 pci_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
116 return raw_pci_ops->read(pci_domain_nr(bus), bus->number,
117 devfn, where, size, value);
121 pci_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
123 return raw_pci_ops->write(pci_domain_nr(bus), bus->number,
124 devfn, where, size, value);
127 static struct pci_ops pci_root_ops = {
135 struct pci_dev *dev = NULL;
137 printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
140 extern acpi_status acpi_map_iosapic (acpi_handle, u32, void*, void**);
142 acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL);
145 * PCI IRQ routing is set up by pci_enable_device(), but we
146 * also do it here in case there are still broken drivers that
147 * don't use pci_enable_device().
149 while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL)
150 acpi_pci_irq_enable(dev);
155 subsys_initcall(pci_acpi_init);
157 /* Called by ACPI when it finds a new root bus. */
159 static struct pci_controller * __devinit
160 alloc_pci_controller (int seg)
162 struct pci_controller *controller;
164 controller = kmalloc(sizeof(*controller), GFP_KERNEL);
168 memset(controller, 0, sizeof(*controller));
169 controller->segment = seg;
174 alloc_resource (char *name, struct resource *root, unsigned long start, unsigned long end,
177 struct resource *res;
179 res = kmalloc(sizeof(*res), GFP_KERNEL);
183 memset(res, 0, sizeof(*res));
189 if (insert_resource(root, res)) {
198 add_io_space (struct acpi_resource_address64 *addr)
204 if (addr->address_translation_offset == 0)
205 return IO_SPACE_BASE(0); /* part of legacy IO space */
207 if (addr->attribute.io.translation_attribute == ACPI_SPARSE_TRANSLATION)
210 offset = (u64) ioremap(addr->address_translation_offset, 0);
211 for (i = 0; i < num_io_spaces; i++)
212 if (io_space[i].mmio_base == offset &&
213 io_space[i].sparse == sparse)
214 return IO_SPACE_BASE(i);
216 if (num_io_spaces == MAX_IO_SPACES) {
217 printk("Too many IO port spaces\n");
222 io_space[i].mmio_base = offset;
223 io_space[i].sparse = sparse;
225 return IO_SPACE_BASE(i);
228 static acpi_status __devinit
229 count_window (struct acpi_resource *resource, void *data)
231 unsigned int *windows = (unsigned int *) data;
232 struct acpi_resource_address64 addr;
235 status = acpi_resource_to_address64(resource, &addr);
236 if (ACPI_SUCCESS(status))
237 if (addr.resource_type == ACPI_MEMORY_RANGE ||
238 addr.resource_type == ACPI_IO_RANGE)
244 struct pci_root_info {
245 struct pci_controller *controller;
249 static acpi_status __devinit
250 add_window (struct acpi_resource *res, void *data)
252 struct pci_root_info *info = (struct pci_root_info *) data;
253 struct pci_window *window;
254 struct acpi_resource_address64 addr;
256 unsigned long flags, offset = 0;
257 struct resource *root;
259 status = acpi_resource_to_address64(res, &addr);
260 if (ACPI_SUCCESS(status)) {
261 if (!addr.address_length)
264 if (addr.resource_type == ACPI_MEMORY_RANGE) {
265 flags = IORESOURCE_MEM;
266 root = &iomem_resource;
267 offset = addr.address_translation_offset;
268 } else if (addr.resource_type == ACPI_IO_RANGE) {
269 flags = IORESOURCE_IO;
270 root = &ioport_resource;
271 offset = add_io_space(&addr);
277 window = &info->controller->window[info->controller->windows++];
278 window->resource.flags |= flags;
279 window->resource.start = addr.min_address_range;
280 window->resource.end = addr.max_address_range;
281 window->offset = offset;
283 if (alloc_resource(info->name, root, addr.min_address_range + offset,
284 addr.max_address_range + offset, flags))
285 printk(KERN_ERR "alloc 0x%lx-0x%lx from %s for %s failed\n",
286 addr.min_address_range + offset, addr.max_address_range + offset,
287 root->name, info->name);
293 struct pci_bus * __devinit
294 pci_acpi_scan_root (struct acpi_device *device, int domain, int bus)
296 struct pci_root_info info;
297 struct pci_controller *controller;
298 unsigned int windows = 0;
301 controller = alloc_pci_controller(domain);
305 controller->acpi_handle = device->handle;
307 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window, &windows);
308 controller->window = kmalloc(sizeof(*controller->window) * windows, GFP_KERNEL);
309 if (!controller->window)
312 name = kmalloc(16, GFP_KERNEL);
316 sprintf(name, "PCI Bus %04x:%02x", domain, bus);
317 info.controller = controller;
319 acpi_walk_resources(device->handle, METHOD_NAME__CRS, add_window, &info);
321 return pci_scan_bus(bus, &pci_root_ops, controller);
324 kfree(controller->window);
332 pcibios_fixup_device_resources (struct pci_dev *dev, struct pci_bus *bus)
334 struct pci_controller *controller = PCI_CONTROLLER(dev);
335 struct pci_window *window;
337 int limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ? \
338 PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES;
340 for (i = 0; i < limit; i++) {
341 if (!dev->resource[i].start)
344 #define contains(win, res) ((res)->start >= (win)->start && \
345 (res)->end <= (win)->end)
347 for (j = 0; j < controller->windows; j++) {
348 window = &controller->window[j];
349 if (((dev->resource[i].flags & IORESOURCE_MEM &&
350 window->resource.flags & IORESOURCE_MEM) ||
351 (dev->resource[i].flags & IORESOURCE_IO &&
352 window->resource.flags & IORESOURCE_IO)) &&
353 contains(&window->resource, &dev->resource[i])) {
354 dev->resource[i].start += window->offset;
355 dev->resource[i].end += window->offset;
358 pci_claim_resource(dev, i);
363 * Called after each bus is probed, but before its children are examined.
366 pcibios_fixup_bus (struct pci_bus *b)
368 struct list_head *ln;
370 for (ln = b->devices.next; ln != &b->devices; ln = ln->next)
371 pcibios_fixup_device_resources(pci_dev_b(ln), b);
377 pcibios_update_irq (struct pci_dev *dev, int irq)
379 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
381 /* ??? FIXME -- record old value for shutdown. */
385 pcibios_enable_resources (struct pci_dev *dev, int mask)
394 pci_read_config_word(dev, PCI_COMMAND, &cmd);
396 for (idx=0; idx<6; idx++) {
397 /* Only set up the desired resources. */
398 if (!(mask & (1 << idx)))
401 r = &dev->resource[idx];
402 if (!r->start && r->end) {
404 "PCI: Device %s not available because of resource collisions\n",
408 if (r->flags & IORESOURCE_IO)
409 cmd |= PCI_COMMAND_IO;
410 if (r->flags & IORESOURCE_MEM)
411 cmd |= PCI_COMMAND_MEMORY;
413 if (dev->resource[PCI_ROM_RESOURCE].start)
414 cmd |= PCI_COMMAND_MEMORY;
415 if (cmd != old_cmd) {
416 printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
417 pci_write_config_word(dev, PCI_COMMAND, cmd);
423 pcibios_enable_device (struct pci_dev *dev, int mask)
427 ret = pcibios_enable_resources(dev, mask);
431 return acpi_pci_irq_enable(dev);
435 pcibios_align_resource (void *data, struct resource *res,
436 unsigned long size, unsigned long align)
441 * PCI BIOS setup, always defaults to SAL interface
444 pcibios_setup (char *str)
450 pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
451 enum pci_mmap_state mmap_state, int write_combine)
454 * I/O space cannot be accessed via normal processor loads and stores on this
457 if (mmap_state == pci_mmap_io)
459 * XXX we could relax this for I/O spaces for which ACPI indicates that
460 * the space is 1-to-1 mapped. But at the moment, we don't support
461 * multiple PCI address spaces and the legacy I/O space is not 1-to-1
462 * mapped, so this is moot.
467 * Leave vm_pgoff as-is, the PCI space address is the physical address on this
470 vma->vm_flags |= (VM_SHM | VM_LOCKED | VM_IO);
473 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
475 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
477 if (remap_page_range(vma, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
478 vma->vm_end - vma->vm_start, vma->vm_page_prot))
485 * pci_cacheline_size - determine cacheline size for PCI devices
488 * We want to use the line-size of the outer-most cache. We assume
489 * that this line-size is the same for all CPUs.
491 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
493 * RETURNS: An appropriate -ERRNO error value on eror, or zero for success.
496 pci_cacheline_size (void)
498 u64 levels, unique_caches;
500 pal_cache_config_info_t cci;
501 static u8 cacheline_size;
504 return cacheline_size;
506 status = ia64_pal_cache_summary(&levels, &unique_caches);
508 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
509 __FUNCTION__, status);
510 return SMP_CACHE_BYTES;
513 status = ia64_pal_cache_config_info(levels - 1, /* cache_type (data_or_unified)= */ 2,
516 printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed (status=%ld)\n",
517 __FUNCTION__, status);
518 return SMP_CACHE_BYTES;
520 cacheline_size = 1 << cci.pcci_line_size;
521 return cacheline_size;
525 * pcibios_prep_mwi - helper function for drivers/pci/pci.c:pci_set_mwi()
526 * @dev: the PCI device for which MWI is enabled
528 * For ia64, we can get the cacheline sizes from PAL.
530 * RETURNS: An appropriate -ERRNO error value on eror, or zero for success.
533 pcibios_prep_mwi (struct pci_dev *dev)
535 unsigned long desired_linesize, current_linesize;
539 desired_linesize = pci_cacheline_size();
541 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &pci_linesize);
542 current_linesize = 4 * pci_linesize;
543 if (desired_linesize != current_linesize) {
544 printk(KERN_WARNING "PCI: slot %s has incorrect PCI cache line size of %lu bytes,",
545 pci_name(dev), current_linesize);
546 if (current_linesize > desired_linesize) {
547 printk(" expected %lu bytes instead\n", desired_linesize);
550 printk(" correcting to %lu\n", desired_linesize);
551 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, desired_linesize / 4);
557 int pci_vector_resources(int last, int nr_released)
559 int count = nr_released;
561 count += (IA64_LAST_DEVICE_VECTOR - last);