2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000,2002-2004 Silicon Graphics, Inc. All rights reserved.
8 * Routines for PCI DMA mapping. See Documentation/DMA-mapping.txt for
9 * a description of how these routines should be used.
12 #include <linux/module.h>
13 #include <asm/sn/sn_sal.h>
14 #include "pci/pcibus_provider_defs.h"
15 #include "pci/pcidev.h"
16 #include "pci/pcibr_provider.h"
18 void sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
22 * sn_pci_alloc_consistent - allocate memory for coherent DMA
23 * @hwdev: device to allocate for
24 * @size: size of the region
25 * @dma_handle: DMA (bus) address
27 * pci_alloc_consistent() returns a pointer to a memory region suitable for
28 * coherent DMA traffic to/from a PCI device. On SN platforms, this means
29 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
31 * This interface is usually used for "command" streams (e.g. the command
32 * queue for a SCSI controller). See Documentation/DMA-mapping.txt for
35 * Also known as platform_pci_alloc_consistent() by the IA64 machvec code.
37 void *sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
38 dma_addr_t * dma_handle)
41 unsigned long phys_addr;
42 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
43 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
45 if (bussoft == NULL) {
49 if (! IS_PCI_BRIDGE_ASIC(bussoft->bs_asic_type)) {
50 return NULL; /* unsupported asic type */
54 * Allocate the memory.
55 * FIXME: We should be doing alloc_pages_node for the node closest
58 if (!(cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size))))
61 memset(cpuaddr, 0x0, size);
63 /* physical addr. of the memory we just got */
64 phys_addr = __pa(cpuaddr);
67 * 64 bit address translations should never fail.
68 * 32 bit translations can fail if there are insufficient mapping
72 *dma_handle = pcibr_dma_map(pcidev_info, phys_addr, size, SN_PCIDMA_CONSISTENT);
75 "sn_pci_alloc_consistent(): failed *dma_handle = 0x%lx hwdev->dev.coherent_dma_mask = 0x%lx \n",
76 *dma_handle, hwdev->dev.coherent_dma_mask);
77 free_pages((unsigned long)cpuaddr, get_order(size));
85 * sn_pci_free_consistent - free memory associated with coherent DMAable region
86 * @hwdev: device to free for
88 * @vaddr: kernel virtual address to free
89 * @dma_handle: DMA address associated with this region
91 * Frees the memory allocated by pci_alloc_consistent(). Also known
92 * as platform_pci_free_consistent() by the IA64 machvec code.
95 sn_pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
96 dma_addr_t dma_handle)
98 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
99 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
105 pcibr_dma_unmap(pcidev_info, dma_handle, 0);
106 free_pages((unsigned long)vaddr, get_order(size));
110 * sn_pci_map_sg - map a scatter-gather list for DMA
111 * @hwdev: device to map for
112 * @sg: scatterlist to map
113 * @nents: number of entries
114 * @direction: direction of the DMA transaction
116 * Maps each entry of @sg for DMA. Also known as platform_pci_map_sg by the
120 sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
125 unsigned long phys_addr;
126 struct scatterlist *saved_sg = sg;
127 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
128 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
130 /* can't go anywhere w/o a direction in life */
131 if (direction == PCI_DMA_NONE)
138 /* SN cannot support DMA addresses smaller than 32 bits. */
139 if (hwdev->dma_mask < 0x7fffffff)
143 * Setup a DMA address for each entry in the
146 for (i = 0; i < nents; i++, sg++) {
148 __pa((unsigned long)page_address(sg->page) + sg->offset);
149 sg->dma_address = pcibr_dma_map(pcidev_info, phys_addr, sg->length, 0);
151 if (!sg->dma_address) {
152 printk(KERN_ERR "sn_pci_map_sg: Unable to allocate "
153 "anymore page map entries.\n");
155 * We will need to free all previously allocated entries.
158 sn_pci_unmap_sg(hwdev, saved_sg, i, direction);
163 sg->dma_length = sg->length;
171 * sn_pci_unmap_sg - unmap a scatter-gather list
172 * @hwdev: device to unmap
173 * @sg: scatterlist to unmap
174 * @nents: number of scatterlist entries
175 * @direction: DMA direction
177 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
178 * concerning calls here are the same as for pci_unmap_single() below. Also
179 * known as sn_pci_unmap_sg() by the IA64 machvec code.
182 sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
186 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
187 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
189 /* can't go anywhere w/o a direction in life */
190 if (direction == PCI_DMA_NONE)
197 for (i = 0; i < nents; i++, sg++) {
198 pcibr_dma_unmap(pcidev_info, sg->dma_address, direction);
199 sg->dma_address = (dma_addr_t) NULL;
205 * sn_pci_map_single - map a single region for DMA
206 * @hwdev: device to map for
207 * @ptr: kernel virtual address of the region to map
208 * @size: size of the region
209 * @direction: DMA direction
211 * Map the region pointed to by @ptr for DMA and return the
212 * DMA address. Also known as platform_pci_map_single() by
213 * the IA64 machvec code.
215 * We map this to the one step pcibr_dmamap_trans interface rather than
216 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
217 * no way of saving the dmamap handle from the alloc to later free
218 * (which is pretty much unacceptable).
220 * TODO: simplify our interface;
221 * get rid of dev_desc and vhdl (seems redundant given a pci_dev);
222 * figure out how to save dmamap handle so can use two step.
225 sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
228 unsigned long phys_addr;
229 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
230 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
232 if (direction == PCI_DMA_NONE)
235 if (bussoft == NULL) {
239 if (! IS_PCI_BRIDGE_ASIC(bussoft->bs_asic_type)) {
240 return 0; /* unsupported asic type */
243 /* SN cannot support DMA addresses smaller than 32 bits. */
244 if (hwdev->dma_mask < 0x7fffffff)
248 * Call our dmamap interface
251 phys_addr = __pa(ptr);
252 dma_addr = pcibr_dma_map(pcidev_info, phys_addr, size, 0);
254 printk(KERN_ERR "pci_map_single: Unable to allocate anymore "
255 "page map entries.\n");
258 return ((dma_addr_t) dma_addr);
262 * sn_pci_dma_sync_single_* - make sure all DMAs or CPU accesses
264 * @hwdev: device to sync
265 * @dma_handle: DMA address to sync
266 * @size: size of region
267 * @direction: DMA direction
269 * This routine is supposed to sync the DMA region specified
270 * by @dma_handle into the 'coherence domain'. We do not need to do
271 * anything on our platform.
274 sn_pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size,
277 struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
278 struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
280 if (direction == PCI_DMA_NONE)
283 if (bussoft == NULL) {
287 if (! IS_PCI_BRIDGE_ASIC(bussoft->bs_asic_type)) {
288 return; /* unsupported asic type */
291 pcibr_dma_unmap(pcidev_info, dma_addr, direction);
295 * sn_dma_supported - test a DMA mask
296 * @hwdev: device to test
297 * @mask: DMA mask to test
299 * Return whether the given PCI device DMA address mask can be supported
300 * properly. For example, if your device can only drive the low 24-bits
301 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
302 * this function. Of course, SN only supports devices that have 32 or more
303 * address bits when using the PMU. We could theoretically support <32 bit
304 * cards using direct mapping, but we'll worry about that later--on the off
305 * chance that someone actually wants to use such a card.
307 int sn_pci_dma_supported(struct pci_dev *hwdev, u64 mask)
309 if (mask < 0x7fffffff)
315 * New generic DMA routines just wrap sn2 PCI routines until we
316 * support other bus types (if ever).
319 int sn_dma_supported(struct device *dev, u64 mask)
321 BUG_ON(dev->bus != &pci_bus_type);
323 return sn_pci_dma_supported(to_pci_dev(dev), mask);
326 EXPORT_SYMBOL(sn_dma_supported);
328 int sn_dma_set_mask(struct device *dev, u64 dma_mask)
330 BUG_ON(dev->bus != &pci_bus_type);
332 if (!sn_dma_supported(dev, dma_mask))
335 *dev->dma_mask = dma_mask;
339 EXPORT_SYMBOL(sn_dma_set_mask);
341 void *sn_dma_alloc_coherent(struct device *dev, size_t size,
342 dma_addr_t * dma_handle, int flag)
344 BUG_ON(dev->bus != &pci_bus_type);
346 return sn_pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
349 EXPORT_SYMBOL(sn_dma_alloc_coherent);
352 sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
353 dma_addr_t dma_handle)
355 BUG_ON(dev->bus != &pci_bus_type);
357 sn_pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
360 EXPORT_SYMBOL(sn_dma_free_coherent);
363 sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
366 BUG_ON(dev->bus != &pci_bus_type);
368 return sn_pci_map_single(to_pci_dev(dev), cpu_addr, size,
372 EXPORT_SYMBOL(sn_dma_map_single);
375 sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
378 BUG_ON(dev->bus != &pci_bus_type);
380 sn_pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
383 EXPORT_SYMBOL(sn_dma_unmap_single);
386 sn_dma_map_page(struct device *dev, struct page *page,
387 unsigned long offset, size_t size, int direction)
389 BUG_ON(dev->bus != &pci_bus_type);
391 return pci_map_page(to_pci_dev(dev), page, offset, size,
395 EXPORT_SYMBOL(sn_dma_map_page);
398 sn_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
401 BUG_ON(dev->bus != &pci_bus_type);
403 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
406 EXPORT_SYMBOL(sn_dma_unmap_page);
409 sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
412 BUG_ON(dev->bus != &pci_bus_type);
414 return sn_pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
417 EXPORT_SYMBOL(sn_dma_map_sg);
420 sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
423 BUG_ON(dev->bus != &pci_bus_type);
425 sn_pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
428 EXPORT_SYMBOL(sn_dma_unmap_sg);
431 sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
432 size_t size, int direction)
434 BUG_ON(dev->bus != &pci_bus_type);
437 EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
440 sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
441 size_t size, int direction)
443 BUG_ON(dev->bus != &pci_bus_type);
446 EXPORT_SYMBOL(sn_dma_sync_single_for_device);
449 sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
452 BUG_ON(dev->bus != &pci_bus_type);
455 EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
458 sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
459 int nelems, int direction)
461 BUG_ON(dev->bus != &pci_bus_type);
464 int sn_dma_mapping_error(dma_addr_t dma_addr)
469 EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
470 EXPORT_SYMBOL(sn_pci_unmap_single);
471 EXPORT_SYMBOL(sn_pci_map_single);
472 EXPORT_SYMBOL(sn_pci_map_sg);
473 EXPORT_SYMBOL(sn_pci_unmap_sg);
474 EXPORT_SYMBOL(sn_pci_alloc_consistent);
475 EXPORT_SYMBOL(sn_pci_free_consistent);
476 EXPORT_SYMBOL(sn_pci_dma_supported);
477 EXPORT_SYMBOL(sn_dma_mapping_error);