fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / i386 / kernel / pci-dma-xen.c
1 /*
2  * Dynamic DMA mapping support.
3  *
4  * On i386 there is no hardware dynamic DMA address translation,
5  * so consistent alloc/free are merely page allocation/freeing.
6  * The rest of the dynamic DMA mapping interface is implemented
7  * in asm/pci.h.
8  */
9
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/string.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/version.h>
16 #include <asm/io.h>
17 #include <xen/balloon.h>
18 #include <asm/swiotlb.h>
19 #include <asm/tlbflush.h>
20 #include <asm-i386/mach-xen/asm/swiotlb.h>
21 #include <asm/bug.h>
22
23 #ifdef __x86_64__
24 #include <asm/proto.h>
25 #include <asm/calgary.h>
26
27 int iommu_merge __read_mostly = 0;
28 EXPORT_SYMBOL(iommu_merge);
29
30 dma_addr_t bad_dma_address __read_mostly;
31 EXPORT_SYMBOL(bad_dma_address);
32
33 /* This tells the BIO block layer to assume merging. Default to off
34    because we cannot guarantee merging later. */
35 int iommu_bio_merge __read_mostly = 0;
36 EXPORT_SYMBOL(iommu_bio_merge);
37
38 int iommu_sac_force __read_mostly = 0;
39 EXPORT_SYMBOL(iommu_sac_force);
40
41 int no_iommu __read_mostly;
42 #ifdef CONFIG_IOMMU_DEBUG
43 int panic_on_overflow __read_mostly = 1;
44 int force_iommu __read_mostly = 1;
45 #else
46 int panic_on_overflow __read_mostly = 0;
47 int force_iommu __read_mostly= 0;
48 #endif
49
50 /* Set this to 1 if there is a HW IOMMU in the system */
51 int iommu_detected __read_mostly = 0;
52
53 void __init pci_iommu_alloc(void)
54 {
55         /*
56          * The order of these functions is important for
57          * fall-back/fail-over reasons
58          */
59 #ifdef CONFIG_IOMMU
60         iommu_hole_init();
61 #endif
62
63 #ifdef CONFIG_CALGARY_IOMMU
64 #include <asm/calgary.h>
65         /* shut up compiler */
66         use_calgary = use_calgary;
67         detect_calgary();
68 #endif
69
70 #ifdef CONFIG_SWIOTLB
71         pci_swiotlb_init();
72 #endif
73 }
74
75 static int __init pci_iommu_init(void)
76 {
77 #ifdef CONFIG_CALGARY_IOMMU
78         calgary_iommu_init();
79 #endif
80
81 #ifdef CONFIG_IOMMU
82         gart_iommu_init();
83 #endif
84
85         no_iommu_init();
86         return 0;
87 }
88
89 /* Must execute after PCI subsystem */
90 fs_initcall(pci_iommu_init);
91 #endif
92
93 struct dma_coherent_mem {
94         void            *virt_base;
95         u32             device_base;
96         int             size;
97         int             flags;
98         unsigned long   *bitmap;
99 };
100
101 #define IOMMU_BUG_ON(test)                              \
102 do {                                                    \
103         if (unlikely(test)) {                           \
104                 printk(KERN_ALERT "Fatal DMA error! "   \
105                        "Please use 'swiotlb=force'\n"); \
106                 BUG();                                  \
107         }                                               \
108 } while (0)
109
110 int
111 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
112            enum dma_data_direction direction)
113 {
114         int i, rc;
115
116         BUG_ON(!valid_dma_direction(direction));
117         WARN_ON(nents == 0 || sg[0].length == 0);
118
119         if (swiotlb) {
120                 rc = swiotlb_map_sg(hwdev, sg, nents, direction);
121         } else {
122                 for (i = 0; i < nents; i++ ) {
123                         sg[i].dma_address =
124                                 page_to_bus(sg[i].page) + sg[i].offset;
125                         sg[i].dma_length  = sg[i].length;
126                         BUG_ON(!sg[i].page);
127                         IOMMU_BUG_ON(address_needs_mapping(
128                                 hwdev, sg[i].dma_address));
129                 }
130                 rc = nents;
131         }
132
133         flush_write_buffers();
134         return rc;
135 }
136 EXPORT_SYMBOL(dma_map_sg);
137
138 void
139 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
140              enum dma_data_direction direction)
141 {
142         BUG_ON(!valid_dma_direction(direction));
143         if (swiotlb)
144                 swiotlb_unmap_sg(hwdev, sg, nents, direction);
145 }
146 EXPORT_SYMBOL(dma_unmap_sg);
147
148 /*
149  * XXX This file is also used by xenLinux/ia64. 
150  * "defined(__i386__) || defined (__x86_64__)" means "!defined(__ia64__)".
151  * This #if work around should be removed once this file is merbed back into
152  * i386' pci-dma or is moved to drivers/xen/core.
153  */
154 #if defined(__i386__) || defined(__x86_64__)
155 dma_addr_t
156 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
157              size_t size, enum dma_data_direction direction)
158 {
159         dma_addr_t dma_addr;
160
161         BUG_ON(!valid_dma_direction(direction));
162
163         if (swiotlb) {
164                 dma_addr = swiotlb_map_page(
165                         dev, page, offset, size, direction);
166         } else {
167                 dma_addr = page_to_bus(page) + offset;
168                 IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
169         }
170
171         return dma_addr;
172 }
173 EXPORT_SYMBOL(dma_map_page);
174
175 void
176 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
177                enum dma_data_direction direction)
178 {
179         BUG_ON(!valid_dma_direction(direction));
180         if (swiotlb)
181                 swiotlb_unmap_page(dev, dma_address, size, direction);
182 }
183 EXPORT_SYMBOL(dma_unmap_page);
184 #endif /* defined(__i386__) || defined(__x86_64__) */
185
186 int
187 dma_mapping_error(dma_addr_t dma_addr)
188 {
189         if (swiotlb)
190                 return swiotlb_dma_mapping_error(dma_addr);
191         return 0;
192 }
193 EXPORT_SYMBOL(dma_mapping_error);
194
195 int
196 dma_supported(struct device *dev, u64 mask)
197 {
198         if (swiotlb)
199                 return swiotlb_dma_supported(dev, mask);
200         /*
201          * By default we'll BUG when an infeasible DMA is requested, and
202          * request swiotlb=force (see IOMMU_BUG_ON).
203          */
204         return 1;
205 }
206 EXPORT_SYMBOL(dma_supported);
207
208 void *dma_alloc_coherent(struct device *dev, size_t size,
209                            dma_addr_t *dma_handle, gfp_t gfp)
210 {
211         void *ret;
212         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
213         unsigned int order = get_order(size);
214         unsigned long vstart;
215         /* ignore region specifiers */
216         gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
217
218         if (mem) {
219                 int page = bitmap_find_free_region(mem->bitmap, mem->size,
220                                                      order);
221                 if (page >= 0) {
222                         *dma_handle = mem->device_base + (page << PAGE_SHIFT);
223                         ret = mem->virt_base + (page << PAGE_SHIFT);
224                         memset(ret, 0, size);
225                         return ret;
226                 }
227                 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
228                         return NULL;
229         }
230
231         if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
232                 gfp |= GFP_DMA;
233
234         vstart = __get_free_pages(gfp, order);
235         ret = (void *)vstart;
236
237         if (ret != NULL) {
238                 if (xen_create_contiguous_region(vstart, order,
239                                                  IO_TLB_DMA_BITS) != 0) {
240                         free_pages(vstart, order);
241                         return NULL;
242                 }
243                 memset(ret, 0, size);
244                 *dma_handle = virt_to_bus(ret);
245         }
246         return ret;
247 }
248 EXPORT_SYMBOL(dma_alloc_coherent);
249
250 void dma_free_coherent(struct device *dev, size_t size,
251                          void *vaddr, dma_addr_t dma_handle)
252 {
253         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
254         int order = get_order(size);
255         
256         if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
257                 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
258
259                 bitmap_release_region(mem->bitmap, page, order);
260         } else {
261                 xen_destroy_contiguous_region((unsigned long)vaddr, order);
262                 free_pages((unsigned long)vaddr, order);
263         }
264 }
265 EXPORT_SYMBOL(dma_free_coherent);
266
267 #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
268 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
269                                 dma_addr_t device_addr, size_t size, int flags)
270 {
271         void __iomem *mem_base = NULL;
272         int pages = size >> PAGE_SHIFT;
273         int bitmap_size = (pages + 31)/32;
274
275         if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
276                 goto out;
277         if (!size)
278                 goto out;
279         if (dev->dma_mem)
280                 goto out;
281
282         /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
283
284         mem_base = ioremap(bus_addr, size);
285         if (!mem_base)
286                 goto out;
287
288         dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
289         if (!dev->dma_mem)
290                 goto out;
291         dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
292         if (!dev->dma_mem->bitmap)
293                 goto free1_out;
294
295         dev->dma_mem->virt_base = mem_base;
296         dev->dma_mem->device_base = device_addr;
297         dev->dma_mem->size = pages;
298         dev->dma_mem->flags = flags;
299
300         if (flags & DMA_MEMORY_MAP)
301                 return DMA_MEMORY_MAP;
302
303         return DMA_MEMORY_IO;
304
305  free1_out:
306         kfree(dev->dma_mem->bitmap);
307  out:
308         if (mem_base)
309                 iounmap(mem_base);
310         return 0;
311 }
312 EXPORT_SYMBOL(dma_declare_coherent_memory);
313
314 void dma_release_declared_memory(struct device *dev)
315 {
316         struct dma_coherent_mem *mem = dev->dma_mem;
317         
318         if(!mem)
319                 return;
320         dev->dma_mem = NULL;
321         iounmap(mem->virt_base);
322         kfree(mem->bitmap);
323         kfree(mem);
324 }
325 EXPORT_SYMBOL(dma_release_declared_memory);
326
327 void *dma_mark_declared_memory_occupied(struct device *dev,
328                                         dma_addr_t device_addr, size_t size)
329 {
330         struct dma_coherent_mem *mem = dev->dma_mem;
331         int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
332         int pos, err;
333
334         if (!mem)
335                 return ERR_PTR(-EINVAL);
336
337         pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
338         err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
339         if (err != 0)
340                 return ERR_PTR(err);
341         return mem->virt_base + (pos << PAGE_SHIFT);
342 }
343 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
344 #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
345
346 dma_addr_t
347 dma_map_single(struct device *dev, void *ptr, size_t size,
348                enum dma_data_direction direction)
349 {
350         dma_addr_t dma;
351
352         BUG_ON(!valid_dma_direction(direction));
353         WARN_ON(size == 0);
354
355         if (swiotlb) {
356                 dma = swiotlb_map_single(dev, ptr, size, direction);
357         } else {
358                 dma = virt_to_bus(ptr);
359                 IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
360                 IOMMU_BUG_ON(address_needs_mapping(dev, dma));
361         }
362
363         flush_write_buffers();
364         return dma;
365 }
366 EXPORT_SYMBOL(dma_map_single);
367
368 void
369 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
370                  enum dma_data_direction direction)
371 {
372         BUG_ON(!valid_dma_direction(direction));
373         if (swiotlb)
374                 swiotlb_unmap_single(dev, dma_addr, size, direction);
375 }
376 EXPORT_SYMBOL(dma_unmap_single);
377
378 void
379 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
380                         enum dma_data_direction direction)
381 {
382         if (swiotlb)
383                 swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
384 }
385 EXPORT_SYMBOL(dma_sync_single_for_cpu);
386
387 void
388 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
389                            enum dma_data_direction direction)
390 {
391         if (swiotlb)
392                 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
393 }
394 EXPORT_SYMBOL(dma_sync_single_for_device);