Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / include / asm-ia64 / dma-mapping.h
1 #ifndef _ASM_IA64_DMA_MAPPING_H
2 #define _ASM_IA64_DMA_MAPPING_H
3
4 /*
5  * Copyright (C) 2003-2004 Hewlett-Packard Co
6  *      David Mosberger-Tang <davidm@hpl.hp.com>
7  */
8 #include <asm/machvec.h>
9
10 #ifndef CONFIG_XEN
11
12 #define dma_alloc_coherent      platform_dma_alloc_coherent
13 #define dma_alloc_noncoherent   platform_dma_alloc_coherent     /* coherent mem. is cheap */
14 #define dma_free_coherent       platform_dma_free_coherent
15 #define dma_free_noncoherent    platform_dma_free_coherent
16 #define dma_map_single          platform_dma_map_single
17 #define dma_map_sg              platform_dma_map_sg
18 #define dma_unmap_single        platform_dma_unmap_single
19 #define dma_unmap_sg            platform_dma_unmap_sg
20 #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
21 #define dma_sync_sg_for_cpu     platform_dma_sync_sg_for_cpu
22 #define dma_sync_single_for_device platform_dma_sync_single_for_device
23 #define dma_sync_sg_for_device  platform_dma_sync_sg_for_device
24 #define dma_mapping_error       platform_dma_mapping_error
25
26
27 #else /* CONFIG_XEN */
28 /* Needed for arch/i386/kernel/swiotlb.c and arch/i386/kernel/pci-dma-xen.c */
29 #include <asm/hypervisor.h>
30 /* Needed for arch/i386/kernel/swiotlb.c */
31 #include <asm-i386/mach-xen/asm/swiotlb.h>
32
33 int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
34                enum dma_data_direction direction);
35 void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
36                   enum dma_data_direction direction);
37 int dma_supported(struct device *dev, u64 mask);
38 void *dma_alloc_coherent(struct device *dev, size_t size,
39                          dma_addr_t *dma_handle, gfp_t gfp);
40 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
41                        dma_addr_t dma_handle);
42 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
43                           enum dma_data_direction direction);
44 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
45                       enum dma_data_direction direction);
46 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
47                              size_t size, enum dma_data_direction direction);
48 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
49                                 size_t size,
50                                 enum dma_data_direction direction);
51 int dma_mapping_error(dma_addr_t dma_addr);
52
53 #define flush_write_buffers()   do { } while (0)
54 static inline void
55 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
56                     enum dma_data_direction direction)
57 {
58         if (swiotlb)
59                 swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
60         flush_write_buffers();
61 }
62
63 static inline void
64 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
65                        enum dma_data_direction direction)
66 {
67         if (swiotlb)
68                 swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
69         flush_write_buffers();
70 }
71 #endif /* CONFIG_XEN */
72
73 #define dma_map_page(dev, pg, off, size, dir)                           \
74         dma_map_single(dev, page_address(pg) + (off), (size), (dir))
75 #define dma_unmap_page(dev, dma_addr, size, dir)                        \
76         dma_unmap_single(dev, dma_addr, size, dir)
77
78 /*
79  * Rest of this file is part of the "Advanced DMA API".  Use at your own risk.
80  * See Documentation/DMA-API.txt for details.
81  */
82
83 #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir)       \
84         dma_sync_single_for_cpu(dev, dma_handle, size, dir)
85 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir)    \
86         dma_sync_single_for_device(dev, dma_handle, size, dir)
87
88 #ifndef CONFIG_XEN
89 #define dma_supported           platform_dma_supported
90 #endif
91
92 static inline int
93 dma_set_mask (struct device *dev, u64 mask)
94 {
95         if (!dev->dma_mask || !dma_supported(dev, mask))
96                 return -EIO;
97         *dev->dma_mask = mask;
98         return 0;
99 }
100
101 extern int dma_get_cache_alignment(void);
102
103 static inline void
104 dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
105 {
106         /*
107          * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do need to
108          * ensure that dma_cache_sync() enforces order, hence the mb().
109          */
110         mb();
111 }
112
113 #define dma_is_consistent(dma_handle)   (1)     /* all we do is coherent memory... */
114
115 #ifdef CONFIG_XEN
116 /* arch/i386/kernel/swiotlb.o requires */
117 void contiguous_bitmap_init(unsigned long end_pfn);
118
119 static inline int
120 address_needs_mapping(struct device *hwdev, dma_addr_t addr)
121 {
122         dma_addr_t mask = DMA_64BIT_MASK;
123         /* If the device has a mask, use it, otherwise default to 64 bits */
124         if (hwdev && hwdev->dma_mask)
125                 mask = *hwdev->dma_mask;
126         return (addr & ~mask) != 0;
127 }
128 #else
129 #define contiguous_bitmap_init(end_pfn) ((void)end_pfn)
130 #endif
131
132 static inline int
133 range_straddles_page_boundary(void *p, size_t size)
134 {
135         extern unsigned long *contiguous_bitmap;
136         return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
137                 !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
138 }
139
140 #endif /* _ASM_IA64_DMA_MAPPING_H */