dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
}
+EXPORT_SYMBOL(pci_map_single);
dma_addr_t
pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
return pci_map_single_1(pdev, (char *)page_address(page) + offset,
size, dac_allowed);
}
+EXPORT_SYMBOL(pci_map_page);
/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
SIZE must match what was provided for in a previous pci_map_single
DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
dma_addr, size, npages, __builtin_return_address(0));
}
+EXPORT_SYMBOL(pci_unmap_single);
void
pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
{
pci_unmap_single(pdev, dma_addr, size, direction);
}
+EXPORT_SYMBOL(pci_unmap_page);
/* Allocate and map kernel buffer using consistent mode DMA for PCI
device. Returns non-NULL cpu-view pointer to the buffer if
{
void *cpu_addr;
long order = get_order(size);
- int gfp = GFP_ATOMIC;
+ gfp_t gfp = GFP_ATOMIC;
try_again:
cpu_addr = (void *)__get_free_pages(gfp, order);
return cpu_addr;
}
+EXPORT_SYMBOL(pci_alloc_consistent);
/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
be values that were returned from pci_alloc_consistent. SIZE must
DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
dma_addr, size, __builtin_return_address(0));
}
-
+EXPORT_SYMBOL(pci_free_consistent);
/* Classify the elements of the scatterlist. Write dma_address
of each element with:
pci_unmap_sg(pdev, start, out - start, direction);
return 0;
}
+EXPORT_SYMBOL(pci_map_sg);
/* Unmap a set of streaming mode DMA translations. Again, cpu read
rules concerning calls here are the same as for pci_unmap_single()
DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
}
+EXPORT_SYMBOL(pci_unmap_sg);
/* Return whether the given PCI device DMA address mask can be
return 0;
}
+EXPORT_SYMBOL(pci_dma_supported);
\f
/*
return ok;
}
+EXPORT_SYMBOL(pci_dac_dma_supported);
dma64_addr_t
pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page,
+ __pa(page_address(page))
+ (dma64_addr_t) offset);
}
+EXPORT_SYMBOL(pci_dac_page_to_dma);
struct page *
pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
unsigned long paddr = (dma_addr & PAGE_MASK) - alpha_mv.pci_dac_offset;
return virt_to_page(__va(paddr));
}
+EXPORT_SYMBOL(pci_dac_dma_to_page);
unsigned long
pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
{
return (dma_addr & ~PAGE_MASK);
}
+EXPORT_SYMBOL(pci_dac_dma_to_offset);
+
+/* Helper for generic DMA-mapping functions. */
+
+struct pci_dev *
+alpha_gendev_to_pci(struct device *dev)
+{
+ if (dev && dev->bus == &pci_bus_type)
+ return to_pci_dev(dev);
+
+ /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
+ BUG() otherwise. */
+ BUG_ON(!isa_bridge);
+
+ /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
+ bridge is bus master then). */
+ if (!dev || !dev->dma_mask || !*dev->dma_mask)
+ return isa_bridge;
+
+ /* For EISA bus masters, return isa_bridge (it might have smaller
+ dma_mask due to wiring limitations). */
+ if (*dev->dma_mask >= isa_bridge->dma_mask)
+ return isa_bridge;
+
+ /* This assumes ISA bus master with dma_mask 0xffffff. */
+ return NULL;
+}
+EXPORT_SYMBOL(alpha_gendev_to_pci);
+
+int
+dma_set_mask(struct device *dev, u64 mask)
+{
+ if (!dev->dma_mask ||
+ !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+EXPORT_SYMBOL(dma_set_mask);