Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / include / asm-i386 / mach-xen / asm / dma-mapping.h
1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
3
4 /*
5  * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6  * documentation.
7  */
8
9 #include <linux/mm.h>
10 #include <asm/cache.h>
11 #include <asm/io.h>
12 #include <asm/scatterlist.h>
13 #include <asm/swiotlb.h>
14
15 static inline int
16 address_needs_mapping(struct device *hwdev, dma_addr_t addr)
17 {
18         dma_addr_t mask = 0xffffffff;
19         /* If the device has a mask, use it, otherwise default to 32 bits */
20         if (hwdev && hwdev->dma_mask)
21                 mask = *hwdev->dma_mask;
22         return (addr & ~mask) != 0;
23 }
24
25 static inline int
26 range_straddles_page_boundary(void *p, size_t size)
27 {
28         extern unsigned long *contiguous_bitmap;
29         return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
30                 !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
31 }
32
33 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
34 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
35
36 void *dma_alloc_coherent(struct device *dev, size_t size,
37                            dma_addr_t *dma_handle, gfp_t flag);
38
39 void dma_free_coherent(struct device *dev, size_t size,
40                          void *vaddr, dma_addr_t dma_handle);
41
42 extern dma_addr_t
43 dma_map_single(struct device *dev, void *ptr, size_t size,
44                enum dma_data_direction direction);
45
46 extern void
47 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
48                  enum dma_data_direction direction);
49
50 extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
51                       int nents, enum dma_data_direction direction);
52 extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
53                          int nents, enum dma_data_direction direction);
54
55 extern dma_addr_t
56 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
57              size_t size, enum dma_data_direction direction);
58
59 extern void
60 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
61                enum dma_data_direction direction);
62
63 extern void
64 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
65                         enum dma_data_direction direction);
66
67 extern void
68 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
69                            enum dma_data_direction direction);
70
71 static inline void
72 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
73                               unsigned long offset, size_t size,
74                               enum dma_data_direction direction)
75 {
76         dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
77 }
78
79 static inline void
80 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
81                                  unsigned long offset, size_t size,
82                                  enum dma_data_direction direction)
83 {
84         dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
85 }
86
87 static inline void
88 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
89                     enum dma_data_direction direction)
90 {
91         if (swiotlb)
92                 swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
93         flush_write_buffers();
94 }
95
96 static inline void
97 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
98                     enum dma_data_direction direction)
99 {
100         if (swiotlb)
101                 swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
102         flush_write_buffers();
103 }
104
105 extern int
106 dma_mapping_error(dma_addr_t dma_addr);
107
108 extern int
109 dma_supported(struct device *dev, u64 mask);
110
111 static inline int
112 dma_set_mask(struct device *dev, u64 mask)
113 {
114         if(!dev->dma_mask || !dma_supported(dev, mask))
115                 return -EIO;
116
117         *dev->dma_mask = mask;
118
119         return 0;
120 }
121
122 static inline int
123 dma_get_cache_alignment(void)
124 {
125         /* no easy way to get cache size on all x86, so return the
126          * maximum possible, to be safe */
127         return (1 << INTERNODE_CACHE_SHIFT);
128 }
129
130 #define dma_is_consistent(d)    (1)
131
132 static inline void
133 dma_cache_sync(void *vaddr, size_t size,
134                enum dma_data_direction direction)
135 {
136         flush_write_buffers();
137 }
138
139 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
140 extern int
141 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
142                             dma_addr_t device_addr, size_t size, int flags);
143
144 extern void
145 dma_release_declared_memory(struct device *dev);
146
147 extern void *
148 dma_mark_declared_memory_occupied(struct device *dev,
149                                   dma_addr_t device_addr, size_t size);
150
151 #endif