X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=drivers%2Fpci%2Fpci.c;h=518ee460f9b1f5fb4ba1e9526f82d24c60f1f295;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=b17ba2d0e511d6239ac28d0e4dc7e4f4b41603b2;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b17ba2d0e..518ee460f 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -9,20 +9,17 @@ * Copyright 1997 -- 2000 Martin Mares */ +#include #include #include #include #include #include +#include #include /* isa_dma_bridge_buggy */ +#include "pci.h" -#undef DEBUG - -#ifdef DEBUG -#define DBG(x...) printk(x) -#else -#define DBG(x...) -#endif +unsigned int pci_pm_d3_delay = 10; /** * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children @@ -37,7 +34,7 @@ pci_bus_max_busnr(struct pci_bus* bus) struct list_head *tmp; unsigned char max, n; - max = bus->number; + max = bus->subordinate; list_for_each(tmp, &bus->children) { n = pci_bus_max_busnr(pci_bus_b(tmp)); if(n > max) @@ -45,7 +42,9 @@ pci_bus_max_busnr(struct pci_bus* bus) } return max; } +EXPORT_SYMBOL_GPL(pci_bus_max_busnr); +#if 0 /** * pci_max_busnr - returns maximum PCI bus number * @@ -67,11 +66,50 @@ pci_max_busnr(void) return max; } -static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_type, int cap) +#endif /* 0 */ + +#define PCI_FIND_CAP_TTL 48 + +static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, + u8 pos, int cap, int *ttl) +{ + u8 id; + + while ((*ttl)--) { + pci_bus_read_config_byte(bus, devfn, pos, &pos); + if (pos < 0x40) + break; + pos &= ~3; + pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, + &id); + if (id == 0xff) + break; + if (id == cap) + return pos; + pos += PCI_CAP_LIST_NEXT; + } + return 0; +} + +static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, + u8 pos, int cap) +{ + int ttl = PCI_FIND_CAP_TTL; + + return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); +} + +int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) +{ + return __pci_find_next_cap(dev->bus, dev->devfn, + pos + PCI_CAP_LIST_NEXT, cap); +} +EXPORT_SYMBOL_GPL(pci_find_next_capability); + +static int __pci_bus_find_cap_start(struct pci_bus *bus, + unsigned int devfn, u8 hdr_type) { u16 status; - u8 pos, id; - int ttl = 48; pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); if (!(status & PCI_STATUS_CAP_LIST)) @@ -80,23 +118,13 @@ static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_ty switch (hdr_type) { case PCI_HEADER_TYPE_NORMAL: case PCI_HEADER_TYPE_BRIDGE: - pci_bus_read_config_byte(bus, devfn, PCI_CAPABILITY_LIST, &pos); - break; + return PCI_CAPABILITY_LIST; case PCI_HEADER_TYPE_CARDBUS: - pci_bus_read_config_byte(bus, devfn, PCI_CB_CAPABILITY_LIST, &pos); - break; + return PCI_CB_CAPABILITY_LIST; default: return 0; } - while (ttl-- && pos >= 0x40) { - pos &= ~3; - pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, &id); - if (id == 0xff) - break; - if (id == cap) - return pos; - pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_NEXT, &pos); - } + return 0; } @@ -121,7 +149,13 @@ static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_ty */ int pci_find_capability(struct pci_dev *dev, int cap) { - return __pci_bus_find_cap(dev->bus, dev->devfn, dev->hdr_type, cap); + int pos; + + pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); + if (pos) + pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); + + return pos; } /** @@ -139,11 +173,16 @@ int pci_find_capability(struct pci_dev *dev, int cap) */ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) { + int pos; u8 hdr_type; pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); - return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap); + pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); + if (pos) + pos = __pci_find_next_cap(bus, devfn, pos, cap); + + return pos; } /** @@ -193,6 +232,76 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap) return 0; } +EXPORT_SYMBOL_GPL(pci_find_ext_capability); + +static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) +{ + int rc, ttl = PCI_FIND_CAP_TTL; + u8 cap, mask; + + if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) + mask = HT_3BIT_CAP_MASK; + else + mask = HT_5BIT_CAP_MASK; + + pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, + PCI_CAP_ID_HT, &ttl); + while (pos) { + rc = pci_read_config_byte(dev, pos + 3, &cap); + if (rc != PCIBIOS_SUCCESSFUL) + return 0; + + if ((cap & mask) == ht_cap) + return pos; + + pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, + pos + PCI_CAP_LIST_NEXT, + PCI_CAP_ID_HT, &ttl); + } + + return 0; +} +/** + * pci_find_next_ht_capability - query a device's Hypertransport capabilities + * @dev: PCI device to query + * @pos: Position from which to continue searching + * @ht_cap: Hypertransport capability code + * + * To be used in conjunction with pci_find_ht_capability() to search for + * all capabilities matching @ht_cap. @pos should always be a value returned + * from pci_find_ht_capability(). + * + * NB. To be 100% safe against broken PCI devices, the caller should take + * steps to avoid an infinite loop. + */ +int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap) +{ + return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); +} +EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); + +/** + * pci_find_ht_capability - query a device's Hypertransport capabilities + * @dev: PCI device to query + * @ht_cap: Hypertransport capability code + * + * Tell if a device supports a given Hypertransport capability. + * Returns an address within the device's PCI configuration space + * or 0 in case the device does not support the request capability. + * The address points to the PCI capability, of type PCI_CAP_ID_HT, + * which has a Hypertransport capability matching @ht_cap. + */ +int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) +{ + int pos; + + pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); + if (pos) + pos = __pci_find_next_ht_cap(dev, pos, ht_cap); + + return pos; +} +EXPORT_SYMBOL_GPL(pci_find_ht_capability); /** * pci_find_parent_resource - return resource region of parent bus of given region @@ -226,6 +335,39 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) return best; } +/** + * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) + * @dev: PCI device to have its BARs restored + * + * Restore the BAR values for a given device, so as to make it + * accessible by its driver. + */ +void +pci_restore_bars(struct pci_dev *dev) +{ + int i, numres; + + switch (dev->hdr_type) { + case PCI_HEADER_TYPE_NORMAL: + numres = 6; + break; + case PCI_HEADER_TYPE_BRIDGE: + numres = 2; + break; + case PCI_HEADER_TYPE_CARDBUS: + numres = 1; + break; + default: + /* Should never get here, but just in case... */ + return; + } + + for (i = 0; i < numres; i ++) + pci_update_resource(dev, &dev->resource[i], i); +} + +int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t); + /** * pci_set_power_state - Set the power state of a PCI device * @dev: PCI device to be suspended @@ -240,11 +382,10 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) * -EIO if device does not support PCI PM. * 0 if we can successfully change the power state. */ - int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { - int pm; + int pm, need_restore = 0; u16 pmcsr, pmc; /* bound the state we're entering */ @@ -255,11 +396,21 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state) * Can enter D0 from any state, but if we can only go deeper * to sleep if we're already in a low power state */ - if (state != PCI_D0 && dev->current_state > state) + if (state != PCI_D0 && dev->current_state > state) { + printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", + __FUNCTION__, pci_name(dev), state, dev->current_state); return -EINVAL; - else if (dev->current_state == state) + } else if (dev->current_state == state) return 0; /* we're already there */ + /* + * If the device or the parent bridge can't support PCI PM, ignore + * the request if we're doing anything besides putting it into D0 + * (which would only happen on boot). + */ + if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) + return 0; + /* find PCI PM capability in list */ pm = pci_find_capability(dev, PCI_CAP_ID_PM); @@ -268,31 +419,40 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state) return -EIO; pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc); - if ((pmc & PCI_PM_CAP_VER_MASK) > 2) { + if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { printk(KERN_DEBUG "PCI: %s has unsupported PM cap regs version (%u)\n", - dev->slot_name, pmc & PCI_PM_CAP_VER_MASK); + pci_name(dev), pmc & PCI_PM_CAP_VER_MASK); return -EIO; } /* check if this device supports the desired state */ - if (state == PCI_D1 || state == PCI_D2) { - if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) - return -EIO; - else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) - return -EIO; - } + if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) + return -EIO; + else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2)) + return -EIO; - /* If we're in D3, force entire word to 0. + pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); + + /* If we're (effectively) in D3, force entire word to 0. * This doesn't affect PME_Status, disables PME_En, and * sets PowerState to 0. */ - if (dev->current_state >= PCI_D3hot) - pmcsr = 0; - else { - pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); + switch (dev->current_state) { + case PCI_D0: + case PCI_D1: + case PCI_D2: pmcsr &= ~PCI_PM_CTRL_STATE_MASK; pmcsr |= state; + break; + case PCI_UNKNOWN: /* Boot-up */ + if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot + && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) + need_restore = 1; + /* Fall-through: force to D0 */ + default: + pmcsr = 0; + break; } /* enter specified state */ @@ -301,46 +461,168 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state) /* Mandatory power management transition delays */ /* see PCI PM 1.1 5.6.1 table 18 */ if (state == PCI_D3hot || dev->current_state == PCI_D3hot) - msleep(10); + msleep(pci_pm_d3_delay); else if (state == PCI_D2 || dev->current_state == PCI_D2) udelay(200); + + /* + * Give firmware a chance to be called, such as ACPI _PRx, _PSx + * Firmware method after native method ? + */ + if (platform_pci_set_power_state) + platform_pci_set_power_state(dev, state); + dev->current_state = state; + /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT + * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning + * from D3hot to D0 _may_ perform an internal reset, thereby + * going to "D0 Uninitialized" rather than "D0 Initialized". + * For example, at least some versions of the 3c905B and the + * 3c556B exhibit this behaviour. + * + * At least some laptop BIOSen (e.g. the Thinkpad T21) leave + * devices in a D3hot state at boot. Consequently, we need to + * restore at least the BARs so that the device will be + * accessible to its driver. + */ + if (need_restore) + pci_restore_bars(dev); + return 0; } +int (*platform_pci_choose_state)(struct pci_dev *dev, pm_message_t state); + /** * pci_choose_state - Choose the power state of a PCI device * @dev: PCI device to be suspended - * @state: target sleep state for the whole system + * @state: target sleep state for the whole system. This is the value + * that is passed to suspend() function. * * Returns PCI power state suitable for given device and given system * message. */ -pci_power_t pci_choose_state(struct pci_dev *dev, u32 state) +pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) { + int ret; + if (!pci_find_capability(dev, PCI_CAP_ID_PM)) return PCI_D0; - switch (state) { - case 0: return PCI_D0; - case 2: return PCI_D2; - case 3: return PCI_D3hot; - default: BUG(); + if (platform_pci_choose_state) { + ret = platform_pci_choose_state(dev, state); + if (ret >= 0) + state.event = ret; + } + + switch (state.event) { + case PM_EVENT_ON: + return PCI_D0; + case PM_EVENT_FREEZE: + case PM_EVENT_PRETHAW: + /* REVISIT both freeze and pre-thaw "should" use D0 */ + case PM_EVENT_SUSPEND: + return PCI_D3hot; + default: + printk("Unrecognized suspend event %d\n", state.event); + BUG(); } return PCI_D0; } EXPORT_SYMBOL(pci_choose_state); +static int pci_save_pcie_state(struct pci_dev *dev) +{ + int pos, i = 0; + struct pci_cap_saved_state *save_state; + u16 *cap; + + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (pos <= 0) + return 0; + + save_state = kzalloc(sizeof(*save_state) + sizeof(u16) * 4, GFP_KERNEL); + if (!save_state) { + dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); + return -ENOMEM; + } + cap = (u16 *)&save_state->data[0]; + + pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]); + pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); + pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); + pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); + pci_add_saved_cap(dev, save_state); + return 0; +} + +static void pci_restore_pcie_state(struct pci_dev *dev) +{ + int i = 0, pos; + struct pci_cap_saved_state *save_state; + u16 *cap; + + save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); + if (!save_state || pos <= 0) + return; + cap = (u16 *)&save_state->data[0]; + + pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]); + pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); + pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); + pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); + pci_remove_saved_cap(save_state); + kfree(save_state); +} + + +static int pci_save_pcix_state(struct pci_dev *dev) +{ + int pos, i = 0; + struct pci_cap_saved_state *save_state; + u16 *cap; + + pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); + if (pos <= 0) + return 0; + + save_state = kzalloc(sizeof(*save_state) + sizeof(u16), GFP_KERNEL); + if (!save_state) { + dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); + return -ENOMEM; + } + cap = (u16 *)&save_state->data[0]; + + pci_read_config_word(dev, pos + PCI_X_CMD, &cap[i++]); + pci_add_saved_cap(dev, save_state); + return 0; +} + +static void pci_restore_pcix_state(struct pci_dev *dev) +{ + int i = 0, pos; + struct pci_cap_saved_state *save_state; + u16 *cap; + + save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); + pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); + if (!save_state || pos <= 0) + return; + cap = (u16 *)&save_state->data[0]; + + pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); + pci_remove_saved_cap(save_state); + kfree(save_state); +} + + /** * pci_save_state - save the PCI configuration space of a device before suspending * @dev: - PCI device that we're dealing with - * @buffer: - buffer to hold config space context - * - * @buffer must be large enough to hold the entire PCI 2.2 config space - * (>= 64 bytes). */ int pci_save_state(struct pci_dev *dev) @@ -349,22 +631,48 @@ pci_save_state(struct pci_dev *dev) /* XXX: 100% dword access ok here? */ for (i = 0; i < 16; i++) pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); + if ((i = pci_save_msi_state(dev)) != 0) + return i; + if ((i = pci_save_msix_state(dev)) != 0) + return i; + if ((i = pci_save_pcie_state(dev)) != 0) + return i; + if ((i = pci_save_pcix_state(dev)) != 0) + return i; return 0; } /** * pci_restore_state - Restore the saved state of a PCI device * @dev: - PCI device that we're dealing with - * @buffer: - saved PCI config space - * */ int pci_restore_state(struct pci_dev *dev) { int i; + int val; - for (i = 0; i < 16; i++) - pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]); + /* PCI Express register must be restored first */ + pci_restore_pcie_state(dev); + + /* + * The Base Address register should be programmed before the command + * register(s) + */ + for (i = 15; i >= 0; i--) { + pci_read_config_dword(dev, i * 4, &val); + if (val != dev->saved_config_space[i]) { + printk(KERN_DEBUG "PM: Writing back config space on " + "device %s at offset %x (was %x, writing %x)\n", + pci_name(dev), i, + val, (int)dev->saved_config_space[i]); + pci_write_config_dword(dev,i * 4, + dev->saved_config_space[i]); + } + } + pci_restore_pcix_state(dev); + pci_restore_msi_state(dev); + pci_restore_msix_state(dev); return 0; } @@ -383,32 +691,60 @@ pci_enable_device_bars(struct pci_dev *dev, int bars) { int err; - pci_set_power_state(dev, PCI_D0); - if ((err = pcibios_enable_device(dev, bars)) < 0) + err = pci_set_power_state(dev, PCI_D0); + if (err < 0 && err != -EIO) + return err; + err = pcibios_enable_device(dev, bars); + if (err < 0) return err; return 0; } /** - * pci_enable_device - Initialize device before it's used by a driver. + * __pci_enable_device - Initialize device before it's used by a driver. * @dev: PCI device to be initialized * * Initialize device before it's used by a driver. Ask low-level code * to enable I/O and memory. Wake up the device if it was suspended. * Beware, this function can fail. + * + * Note this function is a backend and is not supposed to be called by + * normal code, use pci_enable_device() instead. */ int -pci_enable_device(struct pci_dev *dev) +__pci_enable_device(struct pci_dev *dev) { int err; - dev->is_enabled = 1; - if ((err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1))) + err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); + if (err) return err; pci_fixup_device(pci_fixup_enable, dev); return 0; } +/** + * pci_enable_device - Initialize device before it's used by a driver. + * @dev: PCI device to be initialized + * + * Initialize device before it's used by a driver. Ask low-level code + * to enable I/O and memory. Wake up the device if it was suspended. + * Beware, this function can fail. + * + * Note we don't actually enable the device many times if we call + * this function repeatedly (we just increment the count). + */ +int pci_enable_device(struct pci_dev *dev) +{ + int result; + if (atomic_add_return(1, &dev->enable_cnt) > 1) + return 0; /* already enabled */ + result = __pci_enable_device(dev); + if (result < 0) + atomic_dec(&dev->enable_cnt); + return result; +} + /** * pcibios_disable_device - disable arch specific PCI resources for device dev * @dev: the PCI device to disable @@ -425,20 +761,31 @@ void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} * * Signal to the system that the PCI device is not in use by the system * anymore. This only involves disabling PCI bus-mastering, if active. + * + * Note we don't actually disable the device until all callers of + * pci_device_enable() have called pci_device_disable(). */ void pci_disable_device(struct pci_dev *dev) { u16 pci_command; - - dev->is_enabled = 0; - dev->is_busmaster = 0; + + if (atomic_sub_return(1, &dev->enable_cnt) != 0) + return; + + if (dev->msi_enabled) + disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), + PCI_CAP_ID_MSI); + if (dev->msix_enabled) + disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), + PCI_CAP_ID_MSIX); pci_read_config_word(dev, PCI_COMMAND, &pci_command); if (pci_command & PCI_COMMAND_MASTER) { pci_command &= ~PCI_COMMAND_MASTER; pci_write_config_word(dev, PCI_COMMAND, pci_command); } + dev->is_busmaster = 0; pcibios_disable_device(dev); } @@ -498,7 +845,7 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) { u8 pin; - pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); + pin = dev->pin; if (!pin) return -1; pin--; @@ -545,7 +892,7 @@ void pci_release_region(struct pci_dev *pdev, int bar) * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ -int pci_request_region(struct pci_dev *pdev, int bar, char *res_name) +int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) { if (pci_resource_len(pdev, bar) == 0) return 0; @@ -564,10 +911,12 @@ int pci_request_region(struct pci_dev *pdev, int bar, char *res_name) return 0; err_out: - printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n", + printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%llx@%llx " + "for device %s\n", pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", bar + 1, /* PCI BAR # */ - pci_resource_len(pdev, bar), pci_resource_start(pdev, bar), + (unsigned long long)pci_resource_len(pdev, bar), + (unsigned long long)pci_resource_start(pdev, bar), pci_name(pdev)); return -EBUSY; } @@ -603,7 +952,7 @@ void pci_release_regions(struct pci_dev *pdev) * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ -int pci_request_regions(struct pci_dev *pdev, char *res_name) +int pci_request_regions(struct pci_dev *pdev, const char *res_name) { int i; @@ -633,7 +982,7 @@ pci_set_master(struct pci_dev *dev) pci_read_config_word(dev, PCI_COMMAND, &cmd); if (! (cmd & PCI_COMMAND_MASTER)) { - DBG("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); + pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); cmd |= PCI_COMMAND_MASTER; pci_write_config_word(dev, PCI_COMMAND, cmd); } @@ -641,22 +990,38 @@ pci_set_master(struct pci_dev *dev) pcibios_set_master(dev); } -#ifndef HAVE_ARCH_PCI_MWI +#ifdef PCI_DISABLE_MWI +int pci_set_mwi(struct pci_dev *dev) +{ + return 0; +} + +void pci_clear_mwi(struct pci_dev *dev) +{ +} + +#else + +#ifndef PCI_CACHE_LINE_BYTES +#define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES +#endif + /* This can be overridden by arch code. */ -u8 pci_cache_line_size = L1_CACHE_BYTES >> 2; +/* Don't forget this is measured in 32-bit words, not bytes */ +u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4; /** - * pci_generic_prep_mwi - helper function for pci_set_mwi - * @dev: the PCI device for which MWI is enabled + * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed + * @dev: the PCI device for which MWI is to be enabled * - * Helper function for generic implementation of pcibios_prep_mwi - * function. Originally copied from drivers/net/acenic.c. + * Helper function for pci_set_mwi. + * Originally copied from drivers/net/acenic.c. * Copyright 1998-2001 by Jes Sorensen, . * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */ static int -pci_generic_prep_mwi(struct pci_dev *dev) +pci_set_cacheline_size(struct pci_dev *dev) { u8 cacheline_size; @@ -682,7 +1047,6 @@ pci_generic_prep_mwi(struct pci_dev *dev) return -EINVAL; } -#endif /* !HAVE_ARCH_PCI_MWI */ /** * pci_set_mwi - enables memory-write-invalidate PCI transaction @@ -700,18 +1064,13 @@ pci_set_mwi(struct pci_dev *dev) int rc; u16 cmd; -#ifdef HAVE_ARCH_PCI_MWI - rc = pcibios_prep_mwi(dev); -#else - rc = pci_generic_prep_mwi(dev); -#endif - + rc = pci_set_cacheline_size(dev); if (rc) return rc; pci_read_config_word(dev, PCI_COMMAND, &cmd); if (! (cmd & PCI_COMMAND_INVALIDATE)) { - DBG("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev)); + pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev)); cmd |= PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); } @@ -736,6 +1095,32 @@ pci_clear_mwi(struct pci_dev *dev) pci_write_config_word(dev, PCI_COMMAND, cmd); } } +#endif /* ! PCI_DISABLE_MWI */ + +/** + * pci_intx - enables/disables PCI INTx for device dev + * @pdev: the PCI device to operate on + * @enable: boolean: whether to enable or disable PCI INTx + * + * Enables/disables PCI INTx for device dev + */ +void +pci_intx(struct pci_dev *pdev, int enable) +{ + u16 pci_command, new; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_command); + + if (enable) { + new = pci_command & ~PCI_COMMAND_INTX_DISABLE; + } else { + new = pci_command | PCI_COMMAND_INTX_DISABLE; + } + + if (new != pci_command) { + pci_write_config_word(pdev, PCI_COMMAND, new); + } +} #ifndef HAVE_ARCH_PCI_SET_DMA_MASK /* @@ -752,17 +1137,6 @@ pci_set_dma_mask(struct pci_dev *dev, u64 mask) return 0; } -int -pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask) -{ - if (!pci_dac_dma_supported(dev, mask)) - return -EIO; - - dev->dma_mask = mask; - - return 0; -} - int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask) { @@ -792,29 +1166,33 @@ static int __devinit pci_setup(char *str) if (k) *k++ = 0; if (*str && (str = pcibios_setup(str)) && *str) { - /* PCI layer options should be handled here */ - printk(KERN_ERR "PCI: Unknown option `%s'\n", str); + if (!strcmp(str, "nomsi")) { + pci_no_msi(); + } else if (!strcmp(str, "msi")) { + pci_yes_msi(); + } else { + printk(KERN_ERR "PCI: Unknown option `%s'\n", + str); + } } str = k; } - return 1; + return 0; } +early_param("pci", pci_setup); device_initcall(pci_init); -__setup("pci=", pci_setup); - #if defined(CONFIG_ISA) || defined(CONFIG_EISA) /* FIXME: Some boxes have multiple ISA bridges! */ struct pci_dev *isa_bridge; EXPORT_SYMBOL(isa_bridge); #endif +EXPORT_SYMBOL_GPL(pci_restore_bars); EXPORT_SYMBOL(pci_enable_device_bars); EXPORT_SYMBOL(pci_enable_device); EXPORT_SYMBOL(pci_disable_device); -EXPORT_SYMBOL(pci_max_busnr); -EXPORT_SYMBOL(pci_bus_max_busnr); EXPORT_SYMBOL(pci_find_capability); EXPORT_SYMBOL(pci_bus_find_capability); EXPORT_SYMBOL(pci_release_regions); @@ -824,8 +1202,8 @@ EXPORT_SYMBOL(pci_request_region); EXPORT_SYMBOL(pci_set_master); EXPORT_SYMBOL(pci_set_mwi); EXPORT_SYMBOL(pci_clear_mwi); +EXPORT_SYMBOL_GPL(pci_intx); EXPORT_SYMBOL(pci_set_dma_mask); -EXPORT_SYMBOL(pci_dac_set_dma_mask); EXPORT_SYMBOL(pci_set_consistent_dma_mask); EXPORT_SYMBOL(pci_assign_resource); EXPORT_SYMBOL(pci_find_parent_resource);