2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000,2002-2003 Silicon Graphics, Inc. All rights reserved.
8 * Routines for PCI DMA mapping. See Documentation/DMA-mapping.txt for
9 * a description of how these routines should be used.
12 #include <linux/module.h>
13 #include <asm/sn/pci/pci_bus_cvlink.h>
18 pciio_dmamap_t get_free_pciio_dmamap(vertex_hdl_t);
19 void free_pciio_dmamap(pcibr_dmamap_t);
20 static struct pcibr_dmamap_s *find_sn_dma_map(dma_addr_t, unsigned char);
21 void sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
26 extern vertex_hdl_t busnum_to_pcibr_vhdl[];
27 extern nasid_t busnum_to_nid[];
28 extern void * busnum_to_atedmamaps[];
31 * get_free_pciio_dmamap - find and allocate an ATE
32 * @pci_bus: PCI bus to get an entry for
34 * Finds and allocates an ATE on the PCI bus specified
38 get_free_pciio_dmamap(vertex_hdl_t pci_bus)
41 struct pcibr_dmamap_s *sn_dma_map = NULL;
44 * Darn, we need to get the maps allocated for this bus.
46 for (i = 0; i < MAX_PCI_XWIDGET; i++) {
47 if (busnum_to_pcibr_vhdl[i] == pci_bus) {
48 sn_dma_map = busnum_to_atedmamaps[i];
53 * Now get a free dmamap entry from this list.
55 for (i = 0; i < MAX_ATE_MAPS; i++, sn_dma_map++) {
56 if (!sn_dma_map->bd_dma_addr) {
57 sn_dma_map->bd_dma_addr = -1;
58 return( (pciio_dmamap_t) sn_dma_map );
66 * free_pciio_dmamap - free an ATE
67 * @dma_map: ATE to free
69 * Frees the ATE specified by @dma_map.
72 free_pciio_dmamap(pcibr_dmamap_t dma_map)
74 dma_map->bd_dma_addr = 0;
78 * find_sn_dma_map - find an ATE associated with @dma_addr and @busnum
79 * @dma_addr: DMA address to look for
80 * @busnum: PCI bus to look on
82 * Finds the ATE associated with @dma_addr and @busnum.
84 static struct pcibr_dmamap_s *
85 find_sn_dma_map(dma_addr_t dma_addr, unsigned char busnum)
88 struct pcibr_dmamap_s *sn_dma_map = NULL;
91 sn_dma_map = busnum_to_atedmamaps[busnum];
93 for (i = 0; i < MAX_ATE_MAPS; i++, sn_dma_map++) {
94 if (sn_dma_map->bd_dma_addr == dma_addr) {
103 * sn_pci_alloc_consistent - allocate memory for coherent DMA
104 * @hwdev: device to allocate for
105 * @size: size of the region
106 * @dma_handle: DMA (bus) address
108 * pci_alloc_consistent() returns a pointer to a memory region suitable for
109 * coherent DMA traffic to/from a PCI device. On SN platforms, this means
110 * that @dma_handle will have the %PCIIO_DMA_CMD flag set.
112 * This interface is usually used for "command" streams (e.g. the command
113 * queue for a SCSI controller). See Documentation/DMA-mapping.txt for
116 * Also known as platform_pci_alloc_consistent() by the IA64 machvec code.
119 sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
123 struct sn_device_sysdata *device_sysdata;
124 unsigned long phys_addr;
125 pcibr_dmamap_t dma_map = 0;
128 * Get hwgraph vertex for the device
130 device_sysdata = SN_DEVICE_SYSDATA(hwdev);
131 vhdl = device_sysdata->vhdl;
134 * Allocate the memory.
135 * FIXME: We should be doing alloc_pages_node for the node closest
138 if (!(cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size))))
141 memset(cpuaddr, 0x0, size);
143 /* physical addr. of the memory we just got */
144 phys_addr = __pa(cpuaddr);
147 * 64 bit address translations should never fail.
148 * 32 bit translations can fail if there are insufficient mapping
149 * resources and the direct map is already wired to a different
151 * 32 bit translations can also return a > 32 bit address, because
152 * pcibr_dmatrans_addr ignores a missing PCIIO_DMA_A64 flag on
155 if (hwdev->dev.coherent_dma_mask == ~0UL)
156 *dma_handle = pcibr_dmatrans_addr(vhdl, NULL, phys_addr, size,
157 PCIIO_DMA_CMD | PCIIO_DMA_A64);
159 dma_map = pcibr_dmamap_alloc(vhdl, NULL, size, PCIIO_DMA_CMD |
160 MINIMAL_ATE_FLAG(phys_addr, size));
162 *dma_handle = (dma_addr_t)
163 pcibr_dmamap_addr(dma_map, phys_addr, size);
164 dma_map->bd_dma_addr = *dma_handle;
167 *dma_handle = pcibr_dmatrans_addr(vhdl, NULL, phys_addr, size,
172 if (!*dma_handle || *dma_handle > hwdev->dev.coherent_dma_mask) {
174 pcibr_dmamap_done(dma_map);
175 pcibr_dmamap_free(dma_map);
177 free_pages((unsigned long) cpuaddr, get_order(size));
185 * sn_pci_free_consistent - free memory associated with coherent DMAable region
186 * @hwdev: device to free for
187 * @size: size to free
188 * @vaddr: kernel virtual address to free
189 * @dma_handle: DMA address associated with this region
191 * Frees the memory allocated by pci_alloc_consistent(). Also known
192 * as platform_pci_free_consistent() by the IA64 machvec code.
195 sn_pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
197 struct pcibr_dmamap_s *dma_map = NULL;
200 * Get the sn_dma_map entry.
202 if (IS_PCI32_MAPPED(dma_handle))
203 dma_map = find_sn_dma_map(dma_handle, hwdev->bus->number);
206 * and free it if necessary...
209 pcibr_dmamap_done(dma_map);
210 pcibr_dmamap_free(dma_map);
212 free_pages((unsigned long) vaddr, get_order(size));
216 * sn_pci_map_sg - map a scatter-gather list for DMA
217 * @hwdev: device to map for
218 * @sg: scatterlist to map
219 * @nents: number of entries
220 * @direction: direction of the DMA transaction
222 * Maps each entry of @sg for DMA. Also known as platform_pci_map_sg by the
226 sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
230 unsigned long phys_addr;
231 struct sn_device_sysdata *device_sysdata;
232 pcibr_dmamap_t dma_map;
233 struct scatterlist *saved_sg = sg;
236 /* can't go anywhere w/o a direction in life */
237 if (direction == PCI_DMA_NONE)
241 * Get the hwgraph vertex for the device
243 device_sysdata = SN_DEVICE_SYSDATA(hwdev);
244 vhdl = device_sysdata->vhdl;
247 * 64 bit DMA mask can use direct translations
249 * 32 bit DMA mask might be able to use direct, otherwise use dma map
251 * only 64 bit DMA mask supported; both direct and dma map will fail
253 if (hwdev->dma_mask == ~0UL)
254 dma_flag = PCIIO_DMA_DATA | PCIIO_DMA_A64;
256 dma_flag = PCIIO_DMA_DATA;
259 * Setup a DMA address for each entry in the
262 for (i = 0; i < nents; i++, sg++) {
263 phys_addr = __pa((unsigned long)page_address(sg->page) + sg->offset);
264 sg->dma_address = pcibr_dmatrans_addr(vhdl, NULL, phys_addr,
265 sg->length, dma_flag);
266 if (sg->dma_address) {
267 sg->dma_length = sg->length;
271 dma_map = pcibr_dmamap_alloc(vhdl, NULL, sg->length,
272 PCIIO_DMA_DATA|MINIMAL_ATE_FLAG(phys_addr, sg->length));
274 printk(KERN_ERR "sn_pci_map_sg: Unable to allocate "
275 "anymore 32 bit page map entries.\n");
277 * We will need to free all previously allocated entries.
280 sn_pci_unmap_sg(hwdev, saved_sg, i, direction);
285 sg->dma_address = pcibr_dmamap_addr(dma_map, phys_addr, sg->length);
286 sg->dma_length = sg->length;
287 dma_map->bd_dma_addr = sg->dma_address;
295 * sn_pci_unmap_sg - unmap a scatter-gather list
296 * @hwdev: device to unmap
297 * @sg: scatterlist to unmap
298 * @nents: number of scatterlist entries
299 * @direction: DMA direction
301 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
302 * concerning calls here are the same as for pci_unmap_single() below. Also
303 * known as sn_pci_unmap_sg() by the IA64 machvec code.
306 sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
309 struct pcibr_dmamap_s *dma_map;
311 /* can't go anywhere w/o a direction in life */
312 if (direction == PCI_DMA_NONE)
315 for (i = 0; i < nents; i++, sg++){
317 if (IS_PCI32_MAPPED(sg->dma_address)) {
318 dma_map = find_sn_dma_map(sg->dma_address, hwdev->bus->number);
320 pcibr_dmamap_done(dma_map);
321 pcibr_dmamap_free(dma_map);
325 sg->dma_address = (dma_addr_t)NULL;
331 * sn_pci_map_single - map a single region for DMA
332 * @hwdev: device to map for
333 * @ptr: kernel virtual address of the region to map
334 * @size: size of the region
335 * @direction: DMA direction
337 * Map the region pointed to by @ptr for DMA and return the
338 * DMA address. Also known as platform_pci_map_single() by
339 * the IA64 machvec code.
341 * We map this to the one step pcibr_dmamap_trans interface rather than
342 * the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
343 * no way of saving the dmamap handle from the alloc to later free
344 * (which is pretty much unacceptable).
346 * TODO: simplify our interface;
347 * get rid of dev_desc and vhdl (seems redundant given a pci_dev);
348 * figure out how to save dmamap handle so can use two step.
351 sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
355 unsigned long phys_addr;
356 struct sn_device_sysdata *device_sysdata;
357 pcibr_dmamap_t dma_map = NULL;
360 if (direction == PCI_DMA_NONE)
364 * find vertex for the device
366 device_sysdata = SN_DEVICE_SYSDATA(hwdev);
367 vhdl = device_sysdata->vhdl;
369 phys_addr = __pa(ptr);
371 * 64 bit DMA mask can use direct translations
373 * 32 bit DMA mask might be able to use direct, otherwise use dma map
375 * only 64 bit DMA mask supported; both direct and dma map will fail
377 if (hwdev->dma_mask == ~0UL)
378 dma_flag = PCIIO_DMA_DATA | PCIIO_DMA_A64;
380 dma_flag = PCIIO_DMA_DATA;
382 dma_addr = pcibr_dmatrans_addr(vhdl, NULL, phys_addr, size, dma_flag);
387 * It's a 32 bit card and we cannot do direct mapping so
388 * let's use the PMU instead.
391 dma_map = pcibr_dmamap_alloc(vhdl, NULL, size, PCIIO_DMA_DATA |
392 MINIMAL_ATE_FLAG(phys_addr, size));
394 /* PMU out of entries */
398 dma_addr = (dma_addr_t) pcibr_dmamap_addr(dma_map, phys_addr, size);
399 dma_map->bd_dma_addr = dma_addr;
401 return ((dma_addr_t)dma_addr);
405 * sn_pci_unmap_single - unmap a region used for DMA
406 * @hwdev: device to unmap
407 * @dma_addr: DMA address to unmap
408 * @size: size of region
409 * @direction: DMA direction
411 * Unmaps the region pointed to by @dma_addr. Also known as
412 * platform_pci_unmap_single() by the IA64 machvec code.
415 sn_pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
417 struct pcibr_dmamap_s *dma_map = NULL;
419 if (direction == PCI_DMA_NONE)
423 * Get the sn_dma_map entry.
425 if (IS_PCI32_MAPPED(dma_addr))
426 dma_map = find_sn_dma_map(dma_addr, hwdev->bus->number);
429 * and free it if necessary...
432 pcibr_dmamap_done(dma_map);
433 pcibr_dmamap_free(dma_map);
438 * sn_pci_dma_sync_single_* - make sure all DMAs or CPU accesses
440 * @hwdev: device to sync
441 * @dma_handle: DMA address to sync
442 * @size: size of region
443 * @direction: DMA direction
445 * This routine is supposed to sync the DMA region specified
446 * by @dma_handle into the 'coherence domain'. We do not need to do
447 * anything on our platform.
450 sn_pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
456 sn_pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
462 * sn_pci_dma_sync_sg_* - make sure all DMAs or CPU accesses have completed
463 * @hwdev: device to sync
464 * @sg: scatterlist to sync
465 * @nents: number of entries in the scatterlist
466 * @direction: DMA direction
468 * This routine is supposed to sync the DMA regions specified
469 * by @sg into the 'coherence domain'. We do not need to do anything
473 sn_pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
479 sn_pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
485 * sn_dma_supported - test a DMA mask
486 * @hwdev: device to test
487 * @mask: DMA mask to test
489 * Return whether the given PCI device DMA address mask can be supported
490 * properly. For example, if your device can only drive the low 24-bits
491 * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
492 * this function. Of course, SN only supports devices that have 32 or more
493 * address bits when using the PMU. We could theoretically support <32 bit
494 * cards using direct mapping, but we'll worry about that later--on the off
495 * chance that someone actually wants to use such a card.
498 sn_pci_dma_supported(struct pci_dev *hwdev, u64 mask)
500 if (mask < 0xffffffff)
506 * New generic DMA routines just wrap sn2 PCI routines until we
507 * support other bus types (if ever).
511 sn_dma_supported(struct device *dev, u64 mask)
513 BUG_ON(dev->bus != &pci_bus_type);
515 return sn_pci_dma_supported(to_pci_dev(dev), mask);
517 EXPORT_SYMBOL(sn_dma_supported);
520 sn_dma_set_mask(struct device *dev, u64 dma_mask)
522 BUG_ON(dev->bus != &pci_bus_type);
524 if (!sn_dma_supported(dev, dma_mask))
527 *dev->dma_mask = dma_mask;
530 EXPORT_SYMBOL(sn_dma_set_mask);
533 sn_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
536 BUG_ON(dev->bus != &pci_bus_type);
538 return sn_pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
540 EXPORT_SYMBOL(sn_dma_alloc_coherent);
543 sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
544 dma_addr_t dma_handle)
546 BUG_ON(dev->bus != &pci_bus_type);
548 sn_pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
550 EXPORT_SYMBOL(sn_dma_free_coherent);
553 sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
556 BUG_ON(dev->bus != &pci_bus_type);
558 return sn_pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
560 EXPORT_SYMBOL(sn_dma_map_single);
563 sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
566 BUG_ON(dev->bus != &pci_bus_type);
568 sn_pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
570 EXPORT_SYMBOL(sn_dma_unmap_single);
573 sn_dma_map_page(struct device *dev, struct page *page,
574 unsigned long offset, size_t size,
577 BUG_ON(dev->bus != &pci_bus_type);
579 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
581 EXPORT_SYMBOL(sn_dma_map_page);
584 sn_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
587 BUG_ON(dev->bus != &pci_bus_type);
589 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
591 EXPORT_SYMBOL(sn_dma_unmap_page);
594 sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
597 BUG_ON(dev->bus != &pci_bus_type);
599 return sn_pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
601 EXPORT_SYMBOL(sn_dma_map_sg);
604 sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
607 BUG_ON(dev->bus != &pci_bus_type);
609 sn_pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
611 EXPORT_SYMBOL(sn_dma_unmap_sg);
614 sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
617 BUG_ON(dev->bus != &pci_bus_type);
619 sn_pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle, size, (int)direction);
621 EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
624 sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
627 BUG_ON(dev->bus != &pci_bus_type);
629 sn_pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle, size, (int)direction);
631 EXPORT_SYMBOL(sn_dma_sync_single_for_device);
634 sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
637 BUG_ON(dev->bus != &pci_bus_type);
639 sn_pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
641 EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
644 sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
647 BUG_ON(dev->bus != &pci_bus_type);
649 sn_pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
651 EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
654 sn_dma_mapping_error(dma_addr_t dma_addr)
657 * We can only run out of page mapping entries, so if there's
658 * an error, tell the caller to try again later.
665 EXPORT_SYMBOL(sn_dma_mapping_error);
666 EXPORT_SYMBOL(sn_pci_unmap_single);
667 EXPORT_SYMBOL(sn_pci_map_single);
668 EXPORT_SYMBOL(sn_pci_dma_sync_single_for_cpu);
669 EXPORT_SYMBOL(sn_pci_dma_sync_single_for_device);
670 EXPORT_SYMBOL(sn_pci_dma_sync_sg_for_cpu);
671 EXPORT_SYMBOL(sn_pci_dma_sync_sg_for_device);
672 EXPORT_SYMBOL(sn_pci_map_sg);
673 EXPORT_SYMBOL(sn_pci_unmap_sg);
674 EXPORT_SYMBOL(sn_pci_alloc_consistent);
675 EXPORT_SYMBOL(sn_pci_free_consistent);
676 EXPORT_SYMBOL(sn_pci_dma_supported);