X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fia64%2Fhp%2Fcommon%2Fsba_iommu.c;h=ce49fe3a3b5654f88256d24b8fe6fc2410ccfc0b;hb=refs%2Fheads%2Fvserver;hp=bdccd0b1eb601387a527aeff1b8f800cf6657a42;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index bdccd0b1e..ce49fe3a3 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -19,7 +19,6 @@ ** */ -#include #include #include #include @@ -76,7 +75,7 @@ ** If a device prefetches beyond the end of a valid pdir entry, it will cause ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should ** disconnect on 4k boundaries and prevent such issues. If the device is -** particularly agressive, this option will keep the entire pdir valid such +** particularly aggressive, this option will keep the entire pdir valid such ** that prefetching will hit a valid address. This could severely impact ** error containment, and is therefore off by default. The page that is ** used for spill-over is poisoned, so that should help debugging somewhat. @@ -259,10 +258,10 @@ static u64 prefetch_spill_page; /* ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up -** (or rather not merge) DMA's into managable chunks. +** (or rather not merge) DMAs into manageable chunks. ** On parisc, this is more of the software/tuning constraint -** rather than the HW. I/O MMU allocation alogorithms can be -** faster with smaller size is (to some degree). +** rather than the HW. I/O MMU allocation algorithms can be +** faster with smaller sizes (to some degree). */ #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size) @@ -1673,15 +1672,13 @@ ioc_sac_init(struct ioc *ioc) * SAC (single address cycle) addressable, so allocate a * pseudo-device to enforce that. */ - sac = kmalloc(sizeof(*sac), GFP_KERNEL); + sac = kzalloc(sizeof(*sac), GFP_KERNEL); if (!sac) panic(PFX "Couldn't allocate struct pci_dev"); - memset(sac, 0, sizeof(*sac)); - controller = kmalloc(sizeof(*controller), GFP_KERNEL); + controller = kzalloc(sizeof(*controller), GFP_KERNEL); if (!controller) panic(PFX "Couldn't allocate struct pci_controller"); - memset(controller, 0, sizeof(*controller)); controller->iommu = ioc; sac->sysdata = controller; @@ -1738,12 +1735,10 @@ ioc_init(u64 hpa, void *handle) struct ioc *ioc; struct ioc_iommu *info; - ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); + ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) return NULL; - memset(ioc, 0, sizeof(*ioc)); - ioc->next = ioc_list; ioc_list = ioc; @@ -1958,7 +1953,7 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) if (pxm < 0) return; - node = pxm_to_nid_map[pxm]; + node = pxm_to_node(pxm); if (node >= MAX_NUMNODES || !node_online(node)) return; @@ -1999,7 +1994,7 @@ acpi_sba_ioc_add(struct acpi_device *device) if (!iovp_shift) iovp_shift = min(PAGE_SHIFT, 16); } - ACPI_MEM_FREE(dev_info); + kfree(dev_info); /* * default anything not caught above or specified on cmdline to 4k