This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / arch / i386 / kernel / pci-dma-xen.c
1 /*
2  * Dynamic DMA mapping support.
3  *
4  * On i386 there is no hardware dynamic DMA address translation,
5  * so consistent alloc/free are merely page allocation/freeing.
6  * The rest of the dynamic DMA mapping interface is implemented
7  * in asm/pci.h.
8  */
9
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/version.h>
16 #include <asm/io.h>
17 #include <xen/balloon.h>
18 #include <asm/tlbflush.h>
19 #include <asm-i386/mach-xen/asm/swiotlb.h>
20 #include <asm/bug.h>
21
22 #ifdef __x86_64__
23 int iommu_merge __read_mostly = 0;
24 EXPORT_SYMBOL(iommu_merge);
25
26 dma_addr_t bad_dma_address __read_mostly;
27 EXPORT_SYMBOL(bad_dma_address);
28
29 /* This tells the BIO block layer to assume merging. Default to off
30    because we cannot guarantee merging later. */
31 int iommu_bio_merge __read_mostly = 0;
32 EXPORT_SYMBOL(iommu_bio_merge);
33
34 __init int iommu_setup(char *p)
35 {
36     return 1;
37 }
38 #endif
39
40 struct dma_coherent_mem {
41         void            *virt_base;
42         u32             device_base;
43         int             size;
44         int             flags;
45         unsigned long   *bitmap;
46 };
47
48 #define IOMMU_BUG_ON(test)                              \
49 do {                                                    \
50         if (unlikely(test)) {                           \
51                 printk(KERN_ALERT "Fatal DMA error! "   \
52                        "Please use 'swiotlb=force'\n"); \
53                 BUG();                                  \
54         }                                               \
55 } while (0)
56
57 int
58 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
59            enum dma_data_direction direction)
60 {
61         int i, rc;
62
63         if (direction == DMA_NONE)
64                 BUG();
65         WARN_ON(nents == 0 || sg[0].length == 0);
66
67         if (swiotlb) {
68                 rc = swiotlb_map_sg(hwdev, sg, nents, direction);
69         } else {
70                 for (i = 0; i < nents; i++ ) {
71                         sg[i].dma_address =
72                                 page_to_bus(sg[i].page) + sg[i].offset;
73                         sg[i].dma_length  = sg[i].length;
74                         BUG_ON(!sg[i].page);
75                         IOMMU_BUG_ON(address_needs_mapping(
76                                 hwdev, sg[i].dma_address));
77                 }
78                 rc = nents;
79         }
80
81         flush_write_buffers();
82         return rc;
83 }
84 EXPORT_SYMBOL(dma_map_sg);
85
86 void
87 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
88              enum dma_data_direction direction)
89 {
90         BUG_ON(direction == DMA_NONE);
91         if (swiotlb)
92                 swiotlb_unmap_sg(hwdev, sg, nents, direction);
93 }
94 EXPORT_SYMBOL(dma_unmap_sg);
95
96 /*
97  * XXX This file is also used by xenLinux/ia64. 
98  * "defined(__i386__) || defined (__x86_64__)" means "!defined(__ia64__)".
99  * This #if work around should be removed once this file is merbed back into
100  * i386' pci-dma or is moved to drivers/xen/core.
101  */
102 #if defined(__i386__) || defined(__x86_64__)
103 dma_addr_t
104 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
105              size_t size, enum dma_data_direction direction)
106 {
107         dma_addr_t dma_addr;
108
109         BUG_ON(direction == DMA_NONE);
110
111         if (swiotlb) {
112                 dma_addr = swiotlb_map_page(
113                         dev, page, offset, size, direction);
114         } else {
115                 dma_addr = page_to_bus(page) + offset;
116                 IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
117         }
118
119         return dma_addr;
120 }
121 EXPORT_SYMBOL(dma_map_page);
122
123 void
124 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
125                enum dma_data_direction direction)
126 {
127         BUG_ON(direction == DMA_NONE);
128         if (swiotlb)
129                 swiotlb_unmap_page(dev, dma_address, size, direction);
130 }
131 EXPORT_SYMBOL(dma_unmap_page);
132 #endif /* defined(__i386__) || defined(__x86_64__) */
133
134 int
135 dma_mapping_error(dma_addr_t dma_addr)
136 {
137         if (swiotlb)
138                 return swiotlb_dma_mapping_error(dma_addr);
139         return 0;
140 }
141 EXPORT_SYMBOL(dma_mapping_error);
142
143 int
144 dma_supported(struct device *dev, u64 mask)
145 {
146         if (swiotlb)
147                 return swiotlb_dma_supported(dev, mask);
148         /*
149          * By default we'll BUG when an infeasible DMA is requested, and
150          * request swiotlb=force (see IOMMU_BUG_ON).
151          */
152         return 1;
153 }
154 EXPORT_SYMBOL(dma_supported);
155
156 void *dma_alloc_coherent(struct device *dev, size_t size,
157                            dma_addr_t *dma_handle, gfp_t gfp)
158 {
159         void *ret;
160         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
161         unsigned int order = get_order(size);
162         unsigned long vstart;
163         /* ignore region specifiers */
164         gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
165
166         if (mem) {
167                 int page = bitmap_find_free_region(mem->bitmap, mem->size,
168                                                      order);
169                 if (page >= 0) {
170                         *dma_handle = mem->device_base + (page << PAGE_SHIFT);
171                         ret = mem->virt_base + (page << PAGE_SHIFT);
172                         memset(ret, 0, size);
173                         return ret;
174                 }
175                 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
176                         return NULL;
177         }
178
179         if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
180                 gfp |= GFP_DMA;
181
182         vstart = __get_free_pages(gfp, order);
183         ret = (void *)vstart;
184
185         if (ret != NULL) {
186                 /* NB. Hardcode 31 address bits for now: aacraid limitation. */
187                 if (xen_create_contiguous_region(vstart, order, 31) != 0) {
188                         free_pages(vstart, order);
189                         return NULL;
190                 }
191                 memset(ret, 0, size);
192                 *dma_handle = virt_to_bus(ret);
193         }
194         return ret;
195 }
196 EXPORT_SYMBOL(dma_alloc_coherent);
197
198 void dma_free_coherent(struct device *dev, size_t size,
199                          void *vaddr, dma_addr_t dma_handle)
200 {
201         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
202         int order = get_order(size);
203         
204         if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
205                 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
206
207                 bitmap_release_region(mem->bitmap, page, order);
208         } else {
209                 xen_destroy_contiguous_region((unsigned long)vaddr, order);
210                 free_pages((unsigned long)vaddr, order);
211         }
212 }
213 EXPORT_SYMBOL(dma_free_coherent);
214
215 #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
216 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
217                                 dma_addr_t device_addr, size_t size, int flags)
218 {
219         void __iomem *mem_base;
220         int pages = size >> PAGE_SHIFT;
221         int bitmap_size = (pages + 31)/32;
222
223         if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
224                 goto out;
225         if (!size)
226                 goto out;
227         if (dev->dma_mem)
228                 goto out;
229
230         /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
231
232         mem_base = ioremap(bus_addr, size);
233         if (!mem_base)
234                 goto out;
235
236         dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
237         if (!dev->dma_mem)
238                 goto out;
239         memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
240         dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
241         if (!dev->dma_mem->bitmap)
242                 goto free1_out;
243         memset(dev->dma_mem->bitmap, 0, bitmap_size);
244
245         dev->dma_mem->virt_base = mem_base;
246         dev->dma_mem->device_base = device_addr;
247         dev->dma_mem->size = pages;
248         dev->dma_mem->flags = flags;
249
250         if (flags & DMA_MEMORY_MAP)
251                 return DMA_MEMORY_MAP;
252
253         return DMA_MEMORY_IO;
254
255  free1_out:
256         kfree(dev->dma_mem->bitmap);
257  out:
258         return 0;
259 }
260 EXPORT_SYMBOL(dma_declare_coherent_memory);
261
262 void dma_release_declared_memory(struct device *dev)
263 {
264         struct dma_coherent_mem *mem = dev->dma_mem;
265         
266         if(!mem)
267                 return;
268         dev->dma_mem = NULL;
269         iounmap(mem->virt_base);
270         kfree(mem->bitmap);
271         kfree(mem);
272 }
273 EXPORT_SYMBOL(dma_release_declared_memory);
274
275 void *dma_mark_declared_memory_occupied(struct device *dev,
276                                         dma_addr_t device_addr, size_t size)
277 {
278         struct dma_coherent_mem *mem = dev->dma_mem;
279         int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
280         int pos, err;
281
282         if (!mem)
283                 return ERR_PTR(-EINVAL);
284
285         pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
286         err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
287         if (err != 0)
288                 return ERR_PTR(err);
289         return mem->virt_base + (pos << PAGE_SHIFT);
290 }
291 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
292 #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
293
294 dma_addr_t
295 dma_map_single(struct device *dev, void *ptr, size_t size,
296                enum dma_data_direction direction)
297 {
298         dma_addr_t dma;
299
300         if (direction == DMA_NONE)
301                 BUG();
302         WARN_ON(size == 0);
303
304         if (swiotlb) {
305                 dma = swiotlb_map_single(dev, ptr, size, direction);
306         } else {
307                 dma = virt_to_bus(ptr);
308                 IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
309                 IOMMU_BUG_ON(address_needs_mapping(dev, dma));
310         }
311
312         flush_write_buffers();
313         return dma;
314 }
315 EXPORT_SYMBOL(dma_map_single);
316
317 void
318 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
319                  enum dma_data_direction direction)
320 {
321         if (direction == DMA_NONE)
322                 BUG();
323         if (swiotlb)
324                 swiotlb_unmap_single(dev, dma_addr, size, direction);
325 }
326 EXPORT_SYMBOL(dma_unmap_single);
327
328 void
329 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
330                         enum dma_data_direction direction)
331 {
332         if (swiotlb)
333                 swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
334 }
335 EXPORT_SYMBOL(dma_sync_single_for_cpu);
336
337 void
338 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
339                            enum dma_data_direction direction)
340 {
341         if (swiotlb)
342                 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
343 }
344 EXPORT_SYMBOL(dma_sync_single_for_device);