1 #ifndef _ASM_IA64_DMA_MAPPING_H
2 #define _ASM_IA64_DMA_MAPPING_H
5 * Copyright (C) 2003-2004 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
8 #include <asm/machvec.h>
12 #define dma_alloc_coherent platform_dma_alloc_coherent
13 #define dma_alloc_noncoherent platform_dma_alloc_coherent /* coherent mem. is cheap */
14 #define dma_free_coherent platform_dma_free_coherent
15 #define dma_free_noncoherent platform_dma_free_coherent
16 #define dma_map_single platform_dma_map_single
17 #define dma_map_sg platform_dma_map_sg
18 #define dma_unmap_single platform_dma_unmap_single
19 #define dma_unmap_sg platform_dma_unmap_sg
20 #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
21 #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu
22 #define dma_sync_single_for_device platform_dma_sync_single_for_device
23 #define dma_sync_sg_for_device platform_dma_sync_sg_for_device
24 #define dma_mapping_error platform_dma_mapping_error
27 #else /* CONFIG_XEN */
28 /* Needed for arch/i386/kernel/swiotlb.c and arch/i386/kernel/pci-dma-xen.c */
29 #include <asm/hypervisor.h>
30 /* Needed for arch/i386/kernel/swiotlb.c */
31 #include <asm-i386/mach-xen/asm/swiotlb.h>
33 int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
34 enum dma_data_direction direction);
35 void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
36 enum dma_data_direction direction);
37 int dma_supported(struct device *dev, u64 mask);
38 void *dma_alloc_coherent(struct device *dev, size_t size,
39 dma_addr_t *dma_handle, gfp_t gfp);
40 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
41 dma_addr_t dma_handle);
42 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
43 enum dma_data_direction direction);
44 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
45 enum dma_data_direction direction);
46 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
47 size_t size, enum dma_data_direction direction);
48 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
50 enum dma_data_direction direction);
51 int dma_mapping_error(dma_addr_t dma_addr);
53 #define flush_write_buffers() do { } while (0)
55 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
56 enum dma_data_direction direction)
59 swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
60 flush_write_buffers();
64 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
65 enum dma_data_direction direction)
68 swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
69 flush_write_buffers();
71 #endif /* CONFIG_XEN */
73 #define dma_map_page(dev, pg, off, size, dir) \
74 dma_map_single(dev, page_address(pg) + (off), (size), (dir))
75 #define dma_unmap_page(dev, dma_addr, size, dir) \
76 dma_unmap_single(dev, dma_addr, size, dir)
79 * Rest of this file is part of the "Advanced DMA API". Use at your own risk.
80 * See Documentation/DMA-API.txt for details.
83 #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
84 dma_sync_single_for_cpu(dev, dma_handle, size, dir)
85 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
86 dma_sync_single_for_device(dev, dma_handle, size, dir)
89 #define dma_supported platform_dma_supported
93 dma_set_mask (struct device *dev, u64 mask)
95 if (!dev->dma_mask || !dma_supported(dev, mask))
97 *dev->dma_mask = mask;
101 extern int dma_get_cache_alignment(void);
104 dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
107 * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
108 * ensure that dma_cache_sync() enforces order, hence the mb().
113 #define dma_is_consistent(dma_handle) (1) /* all we do is coherent memory... */
116 /* arch/i386/kernel/swiotlb.o requires */
117 void contiguous_bitmap_init(unsigned long end_pfn);
120 address_needs_mapping(struct device *hwdev, dma_addr_t addr)
122 dma_addr_t mask = DMA_64BIT_MASK;
123 /* If the device has a mask, use it, otherwise default to 64 bits */
124 if (hwdev && hwdev->dma_mask)
125 mask = *hwdev->dma_mask;
126 return (addr & ~mask) != 0;
129 #define contiguous_bitmap_init(end_pfn) ((void)end_pfn)
133 range_straddles_page_boundary(void *p, size_t size)
135 extern unsigned long *contiguous_bitmap;
136 return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
137 !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
140 #endif /* _ASM_IA64_DMA_MAPPING_H */