2 * pci.c - Low-Level PCI Access in IA-64
4 * Derived from bios32.c of i386 tree.
6 * Copyright (C) 2002 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Bjorn Helgaas <bjorn_helgaas@hp.com>
9 * Copyright (C) 2004 Silicon Graphics, Inc.
11 * Note: Above list of copyright holders is incomplete...
13 #include <linux/config.h>
15 #include <linux/acpi.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/pci.h>
19 #include <linux/init.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/smp_lock.h>
23 #include <linux/spinlock.h>
25 #include <asm/machvec.h>
27 #include <asm/segment.h>
28 #include <asm/system.h>
38 #include <asm/hw_irq.h>
45 #define DBG(x...) printk(x)
50 static int pci_routeirq;
53 * Low-level SAL-based PCI configuration access functions. Note that SAL
54 * calls are already serialized (via sal_lock), so we don't need another
55 * synchronization mechanism here.
58 #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
59 ((u64)(seg << 24) | (u64)(bus << 16) | \
60 (u64)(devfn << 8) | (u64)(reg))
62 /* SAL 3.2 adds support for extended config space. */
64 #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
65 ((u64)(seg << 28) | (u64)(bus << 20) | \
66 (u64)(devfn << 12) | (u64)(reg))
69 pci_sal_read (int seg, int bus, int devfn, int reg, int len, u32 *value)
71 u64 addr, mode, data = 0;
74 if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
77 if ((seg | reg) <= 255) {
78 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
81 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
84 result = ia64_sal_pci_config_read(addr, mode, len, &data);
92 pci_sal_write (int seg, int bus, int devfn, int reg, int len, u32 value)
96 if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
99 if ((seg | reg) <= 255) {
100 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
103 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
106 return ia64_sal_pci_config_write(addr, mode, len, value);
109 static struct pci_raw_ops pci_sal_ops = {
110 .read = pci_sal_read,
111 .write = pci_sal_write
114 struct pci_raw_ops *raw_pci_ops = &pci_sal_ops;
117 pci_read (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
119 return raw_pci_ops->read(pci_domain_nr(bus), bus->number,
120 devfn, where, size, value);
124 pci_write (struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
126 return raw_pci_ops->write(pci_domain_nr(bus), bus->number,
127 devfn, where, size, value);
130 struct pci_ops pci_root_ops = {
136 extern acpi_status acpi_map_iosapic(acpi_handle, u32, void *, void **);
137 static void acpi_map_iosapics(void)
139 acpi_get_devices(NULL, acpi_map_iosapic, NULL, NULL);
142 static void acpi_map_iosapics(void)
146 #endif /* CONFIG_NUMA */
151 struct pci_dev *dev = NULL;
153 printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
159 * PCI IRQ routing is set up by pci_enable_device(), but we
160 * also do it here in case there are still broken drivers that
161 * don't use pci_enable_device().
163 printk(KERN_INFO "** Routing PCI interrupts for all devices because \"pci=routeirq\"\n");
164 printk(KERN_INFO "** was specified. If this was required to make a driver work,\n");
165 printk(KERN_INFO "** please email the output of \"lspci\" to bjorn.helgaas@hp.com\n");
166 printk(KERN_INFO "** so I can fix the driver.\n");
167 for_each_pci_dev(dev)
168 acpi_pci_irq_enable(dev);
170 printk(KERN_INFO "** PCI interrupts are no longer routed automatically. If this\n");
171 printk(KERN_INFO "** causes a device to stop working, it is probably because the\n");
172 printk(KERN_INFO "** driver failed to call pci_enable_device(). As a temporary\n");
173 printk(KERN_INFO "** workaround, the \"pci=routeirq\" argument restores the old\n");
174 printk(KERN_INFO "** behavior. If this argument makes the device work again,\n");
175 printk(KERN_INFO "** please email the output of \"lspci\" to bjorn.helgaas@hp.com\n");
176 printk(KERN_INFO "** so I can fix the driver.\n");
182 subsys_initcall(pci_acpi_init);
184 /* Called by ACPI when it finds a new root bus. */
186 static struct pci_controller * __devinit
187 alloc_pci_controller (int seg)
189 struct pci_controller *controller;
191 controller = kmalloc(sizeof(*controller), GFP_KERNEL);
195 memset(controller, 0, sizeof(*controller));
196 controller->segment = seg;
201 alloc_resource (char *name, struct resource *root, unsigned long start, unsigned long end,
204 struct resource *res;
206 res = kmalloc(sizeof(*res), GFP_KERNEL);
210 memset(res, 0, sizeof(*res));
216 if (insert_resource(root, res)) {
225 add_io_space (struct acpi_resource_address64 *addr)
231 if (addr->address_translation_offset == 0)
232 return IO_SPACE_BASE(0); /* part of legacy IO space */
234 if (addr->attribute.io.translation_attribute == ACPI_SPARSE_TRANSLATION)
237 offset = (u64) ioremap(addr->address_translation_offset, 0);
238 for (i = 0; i < num_io_spaces; i++)
239 if (io_space[i].mmio_base == offset &&
240 io_space[i].sparse == sparse)
241 return IO_SPACE_BASE(i);
243 if (num_io_spaces == MAX_IO_SPACES) {
244 printk("Too many IO port spaces\n");
249 io_space[i].mmio_base = offset;
250 io_space[i].sparse = sparse;
252 return IO_SPACE_BASE(i);
255 static acpi_status __devinit
256 count_window (struct acpi_resource *resource, void *data)
258 unsigned int *windows = (unsigned int *) data;
259 struct acpi_resource_address64 addr;
262 status = acpi_resource_to_address64(resource, &addr);
263 if (ACPI_SUCCESS(status))
264 if (addr.resource_type == ACPI_MEMORY_RANGE ||
265 addr.resource_type == ACPI_IO_RANGE)
271 struct pci_root_info {
272 struct pci_controller *controller;
276 static acpi_status __devinit
277 add_window (struct acpi_resource *res, void *data)
279 struct pci_root_info *info = (struct pci_root_info *) data;
280 struct pci_window *window;
281 struct acpi_resource_address64 addr;
283 unsigned long flags, offset = 0;
284 struct resource *root;
286 status = acpi_resource_to_address64(res, &addr);
287 if (ACPI_SUCCESS(status)) {
288 if (!addr.address_length)
291 if (addr.resource_type == ACPI_MEMORY_RANGE) {
292 flags = IORESOURCE_MEM;
293 root = &iomem_resource;
294 offset = addr.address_translation_offset;
295 } else if (addr.resource_type == ACPI_IO_RANGE) {
296 flags = IORESOURCE_IO;
297 root = &ioport_resource;
298 offset = add_io_space(&addr);
304 window = &info->controller->window[info->controller->windows++];
305 window->resource.flags = flags;
306 window->resource.start = addr.min_address_range;
307 window->resource.end = addr.max_address_range;
308 window->offset = offset;
310 if (alloc_resource(info->name, root, addr.min_address_range + offset,
311 addr.max_address_range + offset, flags))
312 printk(KERN_ERR "alloc 0x%lx-0x%lx from %s for %s failed\n",
313 addr.min_address_range + offset, addr.max_address_range + offset,
314 root->name, info->name);
320 struct pci_bus * __devinit
321 pci_acpi_scan_root (struct acpi_device *device, int domain, int bus)
323 struct pci_root_info info;
324 struct pci_controller *controller;
325 unsigned int windows = 0;
328 controller = alloc_pci_controller(domain);
332 controller->acpi_handle = device->handle;
334 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window, &windows);
335 controller->window = kmalloc(sizeof(*controller->window) * windows, GFP_KERNEL);
336 if (!controller->window)
339 name = kmalloc(16, GFP_KERNEL);
343 sprintf(name, "PCI Bus %04x:%02x", domain, bus);
344 info.controller = controller;
346 acpi_walk_resources(device->handle, METHOD_NAME__CRS, add_window, &info);
348 return pci_scan_bus(bus, &pci_root_ops, controller);
351 kfree(controller->window);
358 void pcibios_resource_to_bus(struct pci_dev *dev,
359 struct pci_bus_region *region, struct resource *res)
361 struct pci_controller *controller = PCI_CONTROLLER(dev);
362 unsigned long offset = 0;
365 for (i = 0; i < controller->windows; i++) {
366 struct pci_window *window = &controller->window[i];
367 if (!(window->resource.flags & res->flags))
369 if (window->resource.start > res->start - window->offset)
371 if (window->resource.end < res->end - window->offset)
373 offset = window->offset;
377 region->start = res->start - offset;
378 region->end = res->end - offset;
380 EXPORT_SYMBOL(pcibios_resource_to_bus);
382 void pcibios_bus_to_resource(struct pci_dev *dev,
383 struct resource *res, struct pci_bus_region *region)
385 struct pci_controller *controller = PCI_CONTROLLER(dev);
386 unsigned long offset = 0;
389 for (i = 0; i < controller->windows; i++) {
390 struct pci_window *window = &controller->window[i];
391 if (!(window->resource.flags & res->flags))
393 if (window->resource.start > region->start)
395 if (window->resource.end < region->end)
397 offset = window->offset;
401 res->start = region->start + offset;
402 res->end = region->end + offset;
405 static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
407 struct pci_bus_region region;
409 int limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ? \
410 PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES;
412 for (i = 0; i < limit; i++) {
413 if (!dev->resource[i].flags)
415 region.start = dev->resource[i].start;
416 region.end = dev->resource[i].end;
417 pcibios_bus_to_resource(dev, &dev->resource[i], ®ion);
418 pci_claim_resource(dev, i);
423 * Called after each bus is probed, but before its children are examined.
426 pcibios_fixup_bus (struct pci_bus *b)
430 list_for_each_entry(dev, &b->devices, bus_list)
431 pcibios_fixup_device_resources(dev);
437 pcibios_update_irq (struct pci_dev *dev, int irq)
439 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
441 /* ??? FIXME -- record old value for shutdown. */
445 pcibios_enable_resources (struct pci_dev *dev, int mask)
454 pci_read_config_word(dev, PCI_COMMAND, &cmd);
456 for (idx=0; idx<6; idx++) {
457 /* Only set up the desired resources. */
458 if (!(mask & (1 << idx)))
461 r = &dev->resource[idx];
462 if (!r->start && r->end) {
464 "PCI: Device %s not available because of resource collisions\n",
468 if (r->flags & IORESOURCE_IO)
469 cmd |= PCI_COMMAND_IO;
470 if (r->flags & IORESOURCE_MEM)
471 cmd |= PCI_COMMAND_MEMORY;
473 if (dev->resource[PCI_ROM_RESOURCE].start)
474 cmd |= PCI_COMMAND_MEMORY;
475 if (cmd != old_cmd) {
476 printk("PCI: Enabling device %s (%04x -> %04x)\n", pci_name(dev), old_cmd, cmd);
477 pci_write_config_word(dev, PCI_COMMAND, cmd);
483 pcibios_enable_device (struct pci_dev *dev, int mask)
487 ret = pcibios_enable_resources(dev, mask);
491 return acpi_pci_irq_enable(dev);
494 #ifdef CONFIG_ACPI_DEALLOCATE_IRQ
496 pcibios_disable_device (struct pci_dev *dev)
498 acpi_pci_irq_disable(dev);
500 #endif /* CONFIG_ACPI_DEALLOCATE_IRQ */
503 pcibios_align_resource (void *data, struct resource *res,
504 unsigned long size, unsigned long align)
509 * PCI BIOS setup, always defaults to SAL interface
512 pcibios_setup (char *str)
514 if (!strcmp(str, "routeirq"))
520 pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
521 enum pci_mmap_state mmap_state, int write_combine)
524 * I/O space cannot be accessed via normal processor loads and
525 * stores on this platform.
527 if (mmap_state == pci_mmap_io)
529 * XXX we could relax this for I/O spaces for which ACPI
530 * indicates that the space is 1-to-1 mapped. But at the
531 * moment, we don't support multiple PCI address spaces and
532 * the legacy I/O space is not 1-to-1 mapped, so this is moot.
537 * Leave vm_pgoff as-is, the PCI space address is the physical
538 * address on this platform.
540 vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
542 if (write_combine && efi_range_is_wc(vma->vm_start,
543 vma->vm_end - vma->vm_start))
544 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
546 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
548 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
549 vma->vm_end - vma->vm_start, vma->vm_page_prot))
556 * ia64_pci_get_legacy_mem - generic legacy mem routine
557 * @bus: bus to get legacy memory base address for
559 * Find the base of legacy memory for @bus. This is typically the first
560 * megabyte of bus address space for @bus or is simply 0 on platforms whose
561 * chipsets support legacy I/O and memory routing. Returns the base address
562 * or an error pointer if an error occurred.
564 * This is the ia64 generic version of this routine. Other platforms
565 * are free to override it with a machine vector.
567 char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
569 return (char *)__IA64_UNCACHED_OFFSET;
573 * pci_mmap_legacy_page_range - map legacy memory space to userland
574 * @bus: bus whose legacy space we're mapping
575 * @vma: vma passed in by mmap
577 * Map legacy memory space for this device back to userspace using a machine
578 * vector to get the base address.
581 pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
585 addr = pci_get_legacy_mem(bus);
587 return PTR_ERR(addr);
589 vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
590 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
591 vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
593 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
594 vma->vm_end - vma->vm_start, vma->vm_page_prot))
601 * ia64_pci_legacy_read - read from legacy I/O space
603 * @port: legacy port value
604 * @val: caller allocated storage for returned value
605 * @size: number of bytes to read
607 * Simply reads @size bytes from @port and puts the result in @val.
609 * Again, this (and the write routine) are generic versions that can be
610 * overridden by the platform. This is necessary on platforms that don't
611 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
613 int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
636 * ia64_pci_legacy_write - perform a legacy I/O write
638 * @port: port to write
639 * @val: value to write
640 * @size: number of bytes to write from @val
642 * Simply writes @size bytes of @val to @port.
644 int ia64_pci_legacy_write(struct pci_dev *bus, u16 port, u32 val, u8 size)
667 * pci_cacheline_size - determine cacheline size for PCI devices
670 * We want to use the line-size of the outer-most cache. We assume
671 * that this line-size is the same for all CPUs.
673 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
675 * RETURNS: An appropriate -ERRNO error value on eror, or zero for success.
678 pci_cacheline_size (void)
680 u64 levels, unique_caches;
682 pal_cache_config_info_t cci;
683 static u8 cacheline_size;
686 return cacheline_size;
688 status = ia64_pal_cache_summary(&levels, &unique_caches);
690 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n",
691 __FUNCTION__, status);
692 return SMP_CACHE_BYTES;
695 status = ia64_pal_cache_config_info(levels - 1, /* cache_type (data_or_unified)= */ 2,
698 printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed (status=%ld)\n",
699 __FUNCTION__, status);
700 return SMP_CACHE_BYTES;
702 cacheline_size = 1 << cci.pcci_line_size;
703 return cacheline_size;
707 * pcibios_prep_mwi - helper function for drivers/pci/pci.c:pci_set_mwi()
708 * @dev: the PCI device for which MWI is enabled
710 * For ia64, we can get the cacheline sizes from PAL.
712 * RETURNS: An appropriate -ERRNO error value on eror, or zero for success.
715 pcibios_prep_mwi (struct pci_dev *dev)
717 unsigned long desired_linesize, current_linesize;
721 desired_linesize = pci_cacheline_size();
723 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &pci_linesize);
724 current_linesize = 4 * pci_linesize;
725 if (desired_linesize != current_linesize) {
726 printk(KERN_WARNING "PCI: slot %s has incorrect PCI cache line size of %lu bytes,",
727 pci_name(dev), current_linesize);
728 if (current_linesize > desired_linesize) {
729 printk(" expected %lu bytes instead\n", desired_linesize);
732 printk(" correcting to %lu\n", desired_linesize);
733 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, desired_linesize / 4);
739 int pci_vector_resources(int last, int nr_released)
741 int count = nr_released;
743 count += (IA64_LAST_DEVICE_VECTOR - last);