1 #ifndef _ASM_IA64_DMA_MAPPING_H
2 #define _ASM_IA64_DMA_MAPPING_H
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 #include <asm/machvec.h>
11 #define dma_alloc_coherent platform_dma_alloc_coherent
12 #define dma_alloc_noncoherent platform_dma_alloc_coherent /* coherent mem. is cheap */
13 #define dma_free_coherent platform_dma_free_coherent
14 #define dma_free_noncoherent platform_dma_free_coherent
15 #define dma_map_single platform_dma_map_single
16 #define dma_map_sg platform_dma_map_sg
17 #define dma_unmap_single platform_dma_unmap_single
18 #define dma_unmap_sg platform_dma_unmap_sg
19 #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
20 #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
21 #define dma_sync_single_for_device platform_dma_sync_single_for_device
22 #define dma_sync_sg_for_device platform_dma_sync_sg_for_device
23 #define dma_mapping_error platform_dma_mapping_error
25 #else /* CONFIG_XEN */
26 /* Needed for arch/i386/kernel/swiotlb.c and arch/i386/kernel/pci-dma-xen.c */
27 #include <asm/hypervisor.h>
28 /* Needed for arch/i386/kernel/swiotlb.c */
29 #include <asm-i386/mach-xen/asm/swiotlb.h>
31 int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
32 enum dma_data_direction direction);
33 void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
34 enum dma_data_direction direction);
35 int dma_supported(struct device *dev, u64 mask);
36 void *dma_alloc_coherent(struct device *dev, size_t size,
37 dma_addr_t *dma_handle, gfp_t gfp);
38 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
39 dma_addr_t dma_handle);
40 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
41 enum dma_data_direction direction);
42 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
43 enum dma_data_direction direction);
44 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
45 size_t size, enum dma_data_direction direction);
46 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
48 enum dma_data_direction direction);
49 int dma_mapping_error(dma_addr_t dma_addr);
51 #define flush_write_buffers() do { } while (0)
53 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
54 enum dma_data_direction direction)
57 swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
58 flush_write_buffers();
62 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
63 enum dma_data_direction direction)
66 swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
67 flush_write_buffers();
69 #endif /* CONFIG_XEN */
71 #define dma_map_page(dev, pg, off, size, dir) \
72 dma_map_single(dev, page_address(pg) + (off), (size), (dir))
73 #define dma_unmap_page(dev, dma_addr, size, dir) \
74 dma_unmap_single(dev, dma_addr, size, dir)
77 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
78 * See Documentation/DMA-API.txt for details.
81 #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
82 dma_sync_single_for_cpu(dev, dma_handle, size, dir)
83 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
84 dma_sync_single_for_device(dev, dma_handle, size, dir)
87 #define dma_supported platform_dma_supported
91 dma_set_mask (struct device *dev, u64 mask)
93 if (!dev->dma_mask || !dma_supported(dev, mask))
95 *dev->dma_mask = mask;
99 extern int dma_get_cache_alignment(void);
102 dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
105 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
106 * ensure that dma_cache_sync() enforces order, hence the mb().
111 #define dma_is_consistent(dma_handle) (1) /* all we do is coherent memory... */
114 /* arch/i386/kernel/swiotlb.o requires */
115 void contiguous_bitmap_init(unsigned long end_pfn);
118 address_needs_mapping(struct device *hwdev, dma_addr_t addr)
120 dma_addr_t mask = DMA_64BIT_MASK;
121 /* If the device has a mask, use it, otherwise default to 64 bits */
122 if (hwdev && hwdev->dma_mask)
123 mask = *hwdev->dma_mask;
124 return (addr & ~mask) != 0;
127 #define contiguous_bitmap_init(end_pfn) ((void)end_pfn)
131 range_straddles_page_boundary(void *p, size_t size)
133 extern unsigned long *contiguous_bitmap;
134 return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
135 !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
138 #endif /* _ASM_IA64_DMA_MAPPING_H */