Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / arch / i386 / kernel / pci-dma-xen.c
1 /*
2  * Dynamic DMA mapping support.
3  *
4  * On i386 there is no hardware dynamic DMA address translation,
5  * so consistent alloc/free are merely page allocation/freeing.
6  * The rest of the dynamic DMA mapping interface is implemented
7  * in asm/pci.h.
8  */
9
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/version.h>
16 #include <asm/io.h>
17 #include <xen/balloon.h>
18 #include <asm/tlbflush.h>
19 #include <asm-i386/mach-xen/asm/swiotlb.h>
20 #include <asm/bug.h>
21
22 #ifdef __x86_64__
23 int iommu_merge __read_mostly = 0;
24 EXPORT_SYMBOL(iommu_merge);
25
26 dma_addr_t bad_dma_address __read_mostly;
27 EXPORT_SYMBOL(bad_dma_address);
28
29 /* This tells the BIO block layer to assume merging. Default to off
30    because we cannot guarantee merging later. */
31 int iommu_bio_merge __read_mostly = 0;
32 EXPORT_SYMBOL(iommu_bio_merge);
33
34 int iommu_sac_force __read_mostly = 0;
35 EXPORT_SYMBOL(iommu_sac_force);
36
37 int no_iommu __read_mostly;
38 #ifdef CONFIG_IOMMU_DEBUG
39 int panic_on_overflow __read_mostly = 1;
40 int force_iommu __read_mostly = 1;
41 #else
42 int panic_on_overflow __read_mostly = 0;
43 int force_iommu __read_mostly= 0;
44 #endif
45
46 /* Set this to 1 if there is a HW IOMMU in the system */
47 int iommu_detected __read_mostly = 0;
48
49 void __init pci_iommu_alloc(void)
50 {
51         /*
52          * The order of these functions is important for
53          * fall-back/fail-over reasons
54          */
55 #ifdef CONFIG_IOMMU
56         iommu_hole_init();
57 #endif
58
59 #ifdef CONFIG_CALGARY_IOMMU
60 #include <asm/calgary.h>
61         detect_calgary();
62 #endif
63
64 #ifdef CONFIG_SWIOTLB
65         pci_swiotlb_init();
66 #endif
67 }
68
69 __init int iommu_setup(char *p)
70 {
71     return 1;
72 }
73 #endif
74
75 struct dma_coherent_mem {
76         void            *virt_base;
77         u32             device_base;
78         int             size;
79         int             flags;
80         unsigned long   *bitmap;
81 };
82
83 #define IOMMU_BUG_ON(test)                              \
84 do {                                                    \
85         if (unlikely(test)) {                           \
86                 printk(KERN_ALERT "Fatal DMA error! "   \
87                        "Please use 'swiotlb=force'\n"); \
88                 BUG();                                  \
89         }                                               \
90 } while (0)
91
92 int
93 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
94            enum dma_data_direction direction)
95 {
96         int i, rc;
97
98         if (direction == DMA_NONE)
99                 BUG();
100         WARN_ON(nents == 0 || sg[0].length == 0);
101
102         if (swiotlb) {
103                 rc = swiotlb_map_sg(hwdev, sg, nents, direction);
104         } else {
105                 for (i = 0; i < nents; i++ ) {
106                         sg[i].dma_address =
107                                 page_to_bus(sg[i].page) + sg[i].offset;
108                         sg[i].dma_length  = sg[i].length;
109                         BUG_ON(!sg[i].page);
110                         IOMMU_BUG_ON(address_needs_mapping(
111                                 hwdev, sg[i].dma_address));
112                 }
113                 rc = nents;
114         }
115
116         flush_write_buffers();
117         return rc;
118 }
119 EXPORT_SYMBOL(dma_map_sg);
120
121 void
122 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
123              enum dma_data_direction direction)
124 {
125         BUG_ON(direction == DMA_NONE);
126         if (swiotlb)
127                 swiotlb_unmap_sg(hwdev, sg, nents, direction);
128 }
129 EXPORT_SYMBOL(dma_unmap_sg);
130
131 /*
132  * XXX This file is also used by xenLinux/ia64. 
133  * "defined(__i386__) || defined (__x86_64__)" means "!defined(__ia64__)".
134  * This #if work around should be removed once this file is merbed back into
135  * i386' pci-dma or is moved to drivers/xen/core.
136  */
137 #if defined(__i386__) || defined(__x86_64__)
138 dma_addr_t
139 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
140              size_t size, enum dma_data_direction direction)
141 {
142         dma_addr_t dma_addr;
143
144         BUG_ON(direction == DMA_NONE);
145
146         if (swiotlb) {
147                 dma_addr = swiotlb_map_page(
148                         dev, page, offset, size, direction);
149         } else {
150                 dma_addr = page_to_bus(page) + offset;
151                 IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
152         }
153
154         return dma_addr;
155 }
156 EXPORT_SYMBOL(dma_map_page);
157
158 void
159 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
160                enum dma_data_direction direction)
161 {
162         BUG_ON(direction == DMA_NONE);
163         if (swiotlb)
164                 swiotlb_unmap_page(dev, dma_address, size, direction);
165 }
166 EXPORT_SYMBOL(dma_unmap_page);
167 #endif /* defined(__i386__) || defined(__x86_64__) */
168
169 int
170 dma_mapping_error(dma_addr_t dma_addr)
171 {
172         if (swiotlb)
173                 return swiotlb_dma_mapping_error(dma_addr);
174         return 0;
175 }
176 EXPORT_SYMBOL(dma_mapping_error);
177
178 int
179 dma_supported(struct device *dev, u64 mask)
180 {
181         if (swiotlb)
182                 return swiotlb_dma_supported(dev, mask);
183         /*
184          * By default we'll BUG when an infeasible DMA is requested, and
185          * request swiotlb=force (see IOMMU_BUG_ON).
186          */
187         return 1;
188 }
189 EXPORT_SYMBOL(dma_supported);
190
191 void *dma_alloc_coherent(struct device *dev, size_t size,
192                            dma_addr_t *dma_handle, gfp_t gfp)
193 {
194         void *ret;
195         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
196         unsigned int order = get_order(size);
197         unsigned long vstart;
198         /* ignore region specifiers */
199         gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
200
201         if (mem) {
202                 int page = bitmap_find_free_region(mem->bitmap, mem->size,
203                                                      order);
204                 if (page >= 0) {
205                         *dma_handle = mem->device_base + (page << PAGE_SHIFT);
206                         ret = mem->virt_base + (page << PAGE_SHIFT);
207                         memset(ret, 0, size);
208                         return ret;
209                 }
210                 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
211                         return NULL;
212         }
213
214         if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
215                 gfp |= GFP_DMA;
216
217         vstart = __get_free_pages(gfp, order);
218         ret = (void *)vstart;
219
220         if (ret != NULL) {
221                 /* NB. Hardcode 31 address bits for now: aacraid limitation. */
222                 if (xen_create_contiguous_region(vstart, order, 31) != 0) {
223                         free_pages(vstart, order);
224                         return NULL;
225                 }
226                 memset(ret, 0, size);
227                 *dma_handle = virt_to_bus(ret);
228         }
229         return ret;
230 }
231 EXPORT_SYMBOL(dma_alloc_coherent);
232
233 void dma_free_coherent(struct device *dev, size_t size,
234                          void *vaddr, dma_addr_t dma_handle)
235 {
236         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
237         int order = get_order(size);
238         
239         if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
240                 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
241
242                 bitmap_release_region(mem->bitmap, page, order);
243         } else {
244                 xen_destroy_contiguous_region((unsigned long)vaddr, order);
245                 free_pages((unsigned long)vaddr, order);
246         }
247 }
248 EXPORT_SYMBOL(dma_free_coherent);
249
250 #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
251 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
252                                 dma_addr_t device_addr, size_t size, int flags)
253 {
254         void __iomem *mem_base;
255         int pages = size >> PAGE_SHIFT;
256         int bitmap_size = (pages + 31)/32;
257
258         if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
259                 goto out;
260         if (!size)
261                 goto out;
262         if (dev->dma_mem)
263                 goto out;
264
265         /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
266
267         mem_base = ioremap(bus_addr, size);
268         if (!mem_base)
269                 goto out;
270
271         dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
272         if (!dev->dma_mem)
273                 goto out;
274         memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
275         dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
276         if (!dev->dma_mem->bitmap)
277                 goto free1_out;
278         memset(dev->dma_mem->bitmap, 0, bitmap_size);
279
280         dev->dma_mem->virt_base = mem_base;
281         dev->dma_mem->device_base = device_addr;
282         dev->dma_mem->size = pages;
283         dev->dma_mem->flags = flags;
284
285         if (flags & DMA_MEMORY_MAP)
286                 return DMA_MEMORY_MAP;
287
288         return DMA_MEMORY_IO;
289
290  free1_out:
291         kfree(dev->dma_mem->bitmap);
292  out:
293         return 0;
294 }
295 EXPORT_SYMBOL(dma_declare_coherent_memory);
296
297 void dma_release_declared_memory(struct device *dev)
298 {
299         struct dma_coherent_mem *mem = dev->dma_mem;
300         
301         if(!mem)
302                 return;
303         dev->dma_mem = NULL;
304         iounmap(mem->virt_base);
305         kfree(mem->bitmap);
306         kfree(mem);
307 }
308 EXPORT_SYMBOL(dma_release_declared_memory);
309
310 void *dma_mark_declared_memory_occupied(struct device *dev,
311                                         dma_addr_t device_addr, size_t size)
312 {
313         struct dma_coherent_mem *mem = dev->dma_mem;
314         int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
315         int pos, err;
316
317         if (!mem)
318                 return ERR_PTR(-EINVAL);
319
320         pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
321         err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
322         if (err != 0)
323                 return ERR_PTR(err);
324         return mem->virt_base + (pos << PAGE_SHIFT);
325 }
326 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
327 #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
328
329 dma_addr_t
330 dma_map_single(struct device *dev, void *ptr, size_t size,
331                enum dma_data_direction direction)
332 {
333         dma_addr_t dma;
334
335         if (direction == DMA_NONE)
336                 BUG();
337         WARN_ON(size == 0);
338
339         if (swiotlb) {
340                 dma = swiotlb_map_single(dev, ptr, size, direction);
341         } else {
342                 dma = virt_to_bus(ptr);
343                 IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
344                 IOMMU_BUG_ON(address_needs_mapping(dev, dma));
345         }
346
347         flush_write_buffers();
348         return dma;
349 }
350 EXPORT_SYMBOL(dma_map_single);
351
352 void
353 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
354                  enum dma_data_direction direction)
355 {
356         if (direction == DMA_NONE)
357                 BUG();
358         if (swiotlb)
359                 swiotlb_unmap_single(dev, dma_addr, size, direction);
360 }
361 EXPORT_SYMBOL(dma_unmap_single);
362
363 void
364 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
365                         enum dma_data_direction direction)
366 {
367         if (swiotlb)
368                 swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
369 }
370 EXPORT_SYMBOL(dma_sync_single_for_cpu);
371
372 void
373 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
374                            enum dma_data_direction direction)
375 {
376         if (swiotlb)
377                 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
378 }
379 EXPORT_SYMBOL(dma_sync_single_for_device);