This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / include / asm-x86_64 / mach-xen / asm / dma-mapping.h
1 #ifndef _X8664_DMA_MAPPING_H
2 #define _X8664_DMA_MAPPING_H 1
3
4 /*
5  * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6  * documentation.
7  */
8
9 #include <linux/config.h>
10
11 #include <asm/scatterlist.h>
12 #include <asm/io.h>
13 #include <asm/swiotlb.h>
14
15 struct dma_mapping_ops {
16         int             (*mapping_error)(dma_addr_t dma_addr);
17         void*           (*alloc_coherent)(struct device *dev, size_t size,
18                                 dma_addr_t *dma_handle, gfp_t gfp);
19         void            (*free_coherent)(struct device *dev, size_t size,
20                                 void *vaddr, dma_addr_t dma_handle);
21         dma_addr_t      (*map_single)(struct device *hwdev, void *ptr,
22                                 size_t size, int direction);
23         /* like map_single, but doesn't check the device mask */
24         dma_addr_t      (*map_simple)(struct device *hwdev, char *ptr,
25                                 size_t size, int direction);
26         void            (*unmap_single)(struct device *dev, dma_addr_t addr,
27                                 size_t size, int direction);
28         void            (*sync_single_for_cpu)(struct device *hwdev,
29                                 dma_addr_t dma_handle, size_t size,
30                                 int direction);
31         void            (*sync_single_for_device)(struct device *hwdev,
32                                 dma_addr_t dma_handle, size_t size,
33                                 int direction);
34         void            (*sync_single_range_for_cpu)(struct device *hwdev,
35                                 dma_addr_t dma_handle, unsigned long offset,
36                                 size_t size, int direction);
37         void            (*sync_single_range_for_device)(struct device *hwdev,
38                                 dma_addr_t dma_handle, unsigned long offset,
39                                 size_t size, int direction);
40         void            (*sync_sg_for_cpu)(struct device *hwdev,
41                                 struct scatterlist *sg, int nelems,
42                                 int direction);
43         void            (*sync_sg_for_device)(struct device *hwdev,
44                                 struct scatterlist *sg, int nelems,
45                                 int direction);
46         int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
47                                 int nents, int direction);
48         void            (*unmap_sg)(struct device *hwdev,
49                                 struct scatterlist *sg, int nents,
50                                 int direction);
51         int             (*dma_supported)(struct device *hwdev, u64 mask);
52         int             is_phys;
53 };
54
55 extern dma_addr_t bad_dma_address;
56 extern struct dma_mapping_ops* dma_ops;
57 extern int iommu_merge;
58
59 #if 0
60 static inline int dma_mapping_error(dma_addr_t dma_addr)
61 {
62         if (dma_ops->mapping_error)
63                 return dma_ops->mapping_error(dma_addr);
64
65         return (dma_addr == bad_dma_address);
66 }
67
68 extern void *dma_alloc_coherent(struct device *dev, size_t size,
69                                 dma_addr_t *dma_handle, gfp_t gfp);
70 extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
71                               dma_addr_t dma_handle);
72
73 static inline dma_addr_t
74 dma_map_single(struct device *hwdev, void *ptr, size_t size,
75                int direction)
76 {
77         return dma_ops->map_single(hwdev, ptr, size, direction);
78 }
79
80 static inline void
81 dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
82                  int direction)
83 {
84         dma_ops->unmap_single(dev, addr, size, direction);
85 }
86
87 #define dma_map_page(dev,page,offset,size,dir) \
88         dma_map_single((dev), page_address(page)+(offset), (size), (dir))
89
90 #define dma_unmap_page dma_unmap_single
91
92 static inline void
93 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
94                         size_t size, int direction)
95 {
96         if (dma_ops->sync_single_for_cpu)
97                 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
98                                              direction);
99         flush_write_buffers();
100 }
101
102 static inline void
103 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
104                            size_t size, int direction)
105 {
106         if (dma_ops->sync_single_for_device)
107                 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
108                                                 direction);
109         flush_write_buffers();
110 }
111
112 static inline void
113 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
114                               unsigned long offset, size_t size, int direction)
115 {
116         if (dma_ops->sync_single_range_for_cpu) {
117                 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
118         }
119
120         flush_write_buffers();
121 }
122
123 static inline void
124 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
125                                  unsigned long offset, size_t size, int direction)
126 {
127         if (dma_ops->sync_single_range_for_device)
128                 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
129                                                       offset, size, direction);
130
131         flush_write_buffers();
132 }
133
134 static inline void
135 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
136                     int nelems, int direction)
137 {
138         if (dma_ops->sync_sg_for_cpu)
139                 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
140         flush_write_buffers();
141 }
142
143 static inline void
144 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
145                        int nelems, int direction)
146 {
147         if (dma_ops->sync_sg_for_device) {
148                 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
149         }
150
151         flush_write_buffers();
152 }
153
154 static inline int
155 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
156 {
157         return dma_ops->map_sg(hwdev, sg, nents, direction);
158 }
159
160 static inline void
161 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
162              int direction)
163 {
164         dma_ops->unmap_sg(hwdev, sg, nents, direction);
165 }
166
167 extern int dma_supported(struct device *hwdev, u64 mask);
168
169 /* same for gart, swiotlb, and nommu */
170 static inline int dma_get_cache_alignment(void)
171 {
172         return boot_cpu_data.x86_clflush_size;
173 }
174
175 #define dma_is_consistent(h) 1
176
177 extern int dma_set_mask(struct device *dev, u64 mask);
178
179 static inline void
180 dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
181 {
182         flush_write_buffers();
183 }
184
185 extern struct device fallback_dev;
186 extern int panic_on_overflow;
187 #endif
188
189 #endif /* _X8664_DMA_MAPPING_H */
190
191 #include <asm-i386/mach-xen/asm/dma-mapping.h>