2 * Dynamic DMA mapping support.
5 #include <linux/types.h>
7 #include <linux/string.h>
9 #include <linux/module.h>
11 #include <asm-xen/balloon.h>
13 /* Map a set of buffers described by scatterlist in streaming
14 * mode for DMA. This is the scatter-gather version of the
15 * above pci_map_single interface. Here the scatter gather list
16 * elements are each tagged with the appropriate dma address
17 * and length. They are obtained via sg_dma_{address,length}(SG).
19 * NOTE: An implementation may be able to use a smaller number of
20 * DMA address/length pairs than there are SG table elements.
21 * (for example via virtual mapping capabilities)
22 * The routine returns the number of addr/length pairs actually
23 * used, at most nents.
25 * Device ownership issues as mentioned above for pci_map_single are
28 int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
29 int nents, int direction)
33 BUG_ON(direction == DMA_NONE);
34 for (i = 0; i < nents; i++ ) {
35 struct scatterlist *s = &sg[i];
37 s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
38 s->dma_length = s->length;
43 EXPORT_SYMBOL(dma_map_sg);
45 /* Unmap a set of streaming mode DMA translations.
46 * Again, cpu read rules concerning calls here are the same as for
47 * pci_unmap_single() above.
49 void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
53 for (i = 0; i < nents; i++) {
54 struct scatterlist *s = &sg[i];
55 BUG_ON(s->page == NULL);
56 BUG_ON(s->dma_address == 0);
57 dma_unmap_single(dev, s->dma_address, s->dma_length, dir);
61 struct dma_coherent_mem {
66 unsigned long *bitmap;
70 xen_contig_memory(unsigned long vstart, unsigned int order)
73 * Ensure multi-page extents are contiguous in machine memory.
74 * This code could be cleaned up some, and the number of
81 unsigned long pfn, i, flags;
83 scrub_pages(vstart, 1 << order);
87 /* 1. Zap current PTEs, giving away the underlying pages. */
88 for (i = 0; i < (1<<order); i++) {
89 pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
90 pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
91 pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
92 pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
93 pfn = pte->pte >> PAGE_SHIFT;
94 xen_l1_entry_update(pte, 0);
95 phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
96 (u32)INVALID_P2M_ENTRY;
97 if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
98 &pfn, 1, 0) != 1) BUG();
100 /* 2. Get a new contiguous memory extent. */
101 if (HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
102 &pfn, 1, order) != 1) BUG();
103 /* 3. Map the new extent in place of old pages. */
104 for (i = 0; i < (1<<order); i++) {
105 pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
106 pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
107 pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
108 pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
110 pte, ((pfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL);
112 pfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
113 phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
116 /* Flush updates through and flush the TLB. */
119 balloon_unlock(flags);
122 void *dma_alloc_coherent(struct device *dev, size_t size,
123 dma_addr_t *dma_handle, unsigned gfp)
126 unsigned int order = get_order(size);
127 unsigned long vstart;
129 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
131 /* ignore region specifiers */
132 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
135 int page = bitmap_find_free_region(mem->bitmap, mem->size,
138 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
139 ret = mem->virt_base + (page << PAGE_SHIFT);
140 memset(ret, 0, size);
143 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
147 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
150 vstart = __get_free_pages(gfp, order);
151 ret = (void *)vstart;
155 xen_contig_memory(vstart, order);
157 memset(ret, 0, size);
158 *dma_handle = virt_to_bus(ret);
162 EXPORT_SYMBOL(dma_alloc_coherent);
164 void dma_free_coherent(struct device *dev, size_t size,
165 void *vaddr, dma_addr_t dma_handle)
167 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
168 int order = get_order(size);
170 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
171 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
173 bitmap_release_region(mem->bitmap, page, order);
175 free_pages((unsigned long)vaddr, order);
177 EXPORT_SYMBOL(dma_free_coherent);
180 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
181 dma_addr_t device_addr, size_t size, int flags)
183 void __iomem *mem_base;
184 int pages = size >> PAGE_SHIFT;
185 int bitmap_size = (pages + 31)/32;
187 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
194 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
196 mem_base = ioremap(bus_addr, size);
200 dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
203 memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
204 dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
205 if (!dev->dma_mem->bitmap)
207 memset(dev->dma_mem->bitmap, 0, bitmap_size);
209 dev->dma_mem->virt_base = mem_base;
210 dev->dma_mem->device_base = device_addr;
211 dev->dma_mem->size = pages;
212 dev->dma_mem->flags = flags;
214 if (flags & DMA_MEMORY_MAP)
215 return DMA_MEMORY_MAP;
217 return DMA_MEMORY_IO;
220 kfree(dev->dma_mem->bitmap);
224 EXPORT_SYMBOL(dma_declare_coherent_memory);
226 void dma_release_declared_memory(struct device *dev)
228 struct dma_coherent_mem *mem = dev->dma_mem;
233 iounmap(mem->virt_base);
237 EXPORT_SYMBOL(dma_release_declared_memory);
239 void *dma_mark_declared_memory_occupied(struct device *dev,
240 dma_addr_t device_addr, size_t size)
242 struct dma_coherent_mem *mem = dev->dma_mem;
243 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
247 return ERR_PTR(-EINVAL);
249 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
250 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
253 return mem->virt_base + (pos << PAGE_SHIFT);
255 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);