2 ** PARISC 1.1 Dynamic DMA mapping support.
3 ** This implementation is for PA-RISC platforms that do not support
4 ** I/O TLBs (aka DMA address translation hardware).
5 ** See Documentation/DMA-mapping.txt for interface definitions.
7 ** (c) Copyright 1999,2000 Hewlett-Packard Company
8 ** (c) Copyright 2000 Grant Grundler
9 ** (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
10 ** (c) Copyright 2000 John Marvin
12 ** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
13 ** (I assume it's from David Mosberger-Tang but there was no Copyright)
15 ** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
20 #include <linux/init.h>
22 #include <linux/pci.h>
23 #include <linux/proc_fs.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <linux/types.h>
28 #include <asm/cacheflush.h>
29 #include <asm/dma.h> /* for DMA_CHUNK_SIZE */
31 #include <asm/page.h> /* get_order */
32 #include <asm/pgalloc.h>
33 #include <asm/uaccess.h>
37 #define ASSERT(expr) \
39 printk("\n%s:%d: Assertion " #expr " failed!\n", \
40 __FILE__, __LINE__); \
48 static struct proc_dir_entry * proc_gsc_root = NULL;
49 static int pcxl_proc_info(char *buffer, char **start, off_t offset, int length);
50 static unsigned long pcxl_used_bytes = 0;
51 static unsigned long pcxl_used_pages = 0;
53 extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
54 static spinlock_t pcxl_res_lock;
55 static char *pcxl_res_map;
56 static int pcxl_res_hint;
57 static int pcxl_res_size;
59 #ifdef DEBUG_PCXL_RESOURCE
60 #define DBG_RES(x...) printk(x)
67 ** Dump a hex representation of the resource map.
72 void dump_resmap(void)
74 u_long *res_ptr = (unsigned long *)pcxl_res_map;
78 for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
79 printk("%08lx ", *res_ptr);
84 static inline void dump_resmap(void) {;}
87 static int pa11_dma_supported( struct device *dev, u64 mask)
92 static inline int map_pte_uncached(pte_t * pte,
94 unsigned long size, unsigned long *paddr_ptr)
97 unsigned long orig_vaddr = vaddr;
105 printk(KERN_ERR "map_pte_uncached: page already exists\n");
106 set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
107 pdtlb_kernel(orig_vaddr);
109 orig_vaddr += PAGE_SIZE;
110 (*paddr_ptr) += PAGE_SIZE;
112 } while (vaddr < end);
116 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
117 unsigned long size, unsigned long *paddr_ptr)
120 unsigned long orig_vaddr = vaddr;
122 vaddr &= ~PGDIR_MASK;
124 if (end > PGDIR_SIZE)
127 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, vaddr);
130 if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
132 vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
133 orig_vaddr += PMD_SIZE;
135 } while (vaddr < end);
139 static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
143 unsigned long end = vaddr + size;
145 dir = pgd_offset_k(vaddr);
149 pmd = pmd_alloc(NULL, dir, vaddr);
152 if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
154 vaddr = vaddr + PGDIR_SIZE;
156 } while (vaddr && (vaddr < end));
160 static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
165 unsigned long orig_vaddr = vaddr;
174 pte = pte_offset_map(pmd, vaddr);
182 pdtlb_kernel(orig_vaddr);
184 orig_vaddr += PAGE_SIZE;
186 if (pte_none(page) || pte_present(page))
188 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
189 } while (vaddr < end);
192 static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
197 unsigned long orig_vaddr = vaddr;
206 pmd = pmd_offset(dir, vaddr);
207 vaddr &= ~PGDIR_MASK;
209 if (end > PGDIR_SIZE)
212 unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
213 vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
214 orig_vaddr += PMD_SIZE;
216 } while (vaddr < end);
219 static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
222 unsigned long end = vaddr + size;
224 dir = pgd_offset_k(vaddr);
226 unmap_uncached_pmd(dir, vaddr, end - vaddr);
227 vaddr = vaddr + PGDIR_SIZE;
229 } while (vaddr && (vaddr < end));
232 #define PCXL_SEARCH_LOOP(idx, mask, size) \
233 for(; res_ptr < res_end; ++res_ptr) \
235 if(0 == ((*res_ptr) & mask)) { \
237 idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
238 pcxl_res_hint = idx + (size >> 3); \
239 goto resource_found; \
243 #define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \
244 u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
245 u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
246 PCXL_SEARCH_LOOP(idx, mask, size); \
247 res_ptr = (u##size *)&pcxl_res_map[0]; \
248 PCXL_SEARCH_LOOP(idx, mask, size); \
252 pcxl_alloc_range(size_t size)
256 unsigned int pages_needed = size >> PAGE_SHIFT;
258 ASSERT(pages_needed);
259 ASSERT((pages_needed * PAGE_SIZE) < DMA_CHUNK_SIZE);
260 ASSERT(pages_needed < (BITS_PER_LONG - PAGE_SHIFT));
263 mask >>= BITS_PER_LONG - pages_needed;
265 DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
266 size, pages_needed, mask);
268 spin_lock_irqsave(&pcxl_res_lock, flags);
270 if(pages_needed <= 8) {
271 PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
272 } else if(pages_needed <= 16) {
273 PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
274 } else if(pages_needed <= 32) {
275 PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
277 panic("%s: pcxl_alloc_range() Too many pages to map.\n",
282 panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
287 DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
288 res_idx, mask, pcxl_res_hint);
290 pcxl_used_pages += pages_needed;
291 pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
293 spin_unlock_irqrestore(&pcxl_res_lock, flags);
298 ** return the corresponding vaddr in the pcxl dma map
300 return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
303 #define PCXL_FREE_MAPPINGS(idx, m, size) \
304 u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
305 ASSERT((*res_ptr & m) == m); \
309 ** clear bits in the pcxl resource map
312 pcxl_free_range(unsigned long vaddr, size_t size)
315 unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
316 unsigned int pages_mapped = size >> PAGE_SHIFT;
318 ASSERT(pages_mapped);
319 ASSERT((pages_mapped * PAGE_SIZE) < DMA_CHUNK_SIZE);
320 ASSERT(pages_mapped < (BITS_PER_LONG - PAGE_SHIFT));
323 mask >>= BITS_PER_LONG - pages_mapped;
325 DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
326 res_idx, size, pages_mapped, mask);
328 spin_lock_irqsave(&pcxl_res_lock, flags);
330 if(pages_mapped <= 8) {
331 PCXL_FREE_MAPPINGS(res_idx, mask, 8);
332 } else if(pages_mapped <= 16) {
333 PCXL_FREE_MAPPINGS(res_idx, mask, 16);
334 } else if(pages_mapped <= 32) {
335 PCXL_FREE_MAPPINGS(res_idx, mask, 32);
337 panic("%s: pcxl_free_range() Too many pages to unmap.\n",
341 pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
342 pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
344 spin_unlock_irqrestore(&pcxl_res_lock, flags);
352 if (pcxl_dma_start == 0)
355 spin_lock_init(&pcxl_res_lock);
356 pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
358 pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
359 get_order(pcxl_res_size));
360 memset(pcxl_res_map, 0, pcxl_res_size);
361 proc_gsc_root = proc_mkdir("gsc", 0);
362 create_proc_info_entry("dino", 0, proc_gsc_root, pcxl_proc_info);
366 __initcall(pcxl_dma_init);
368 static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, int flag)
374 order = get_order(size);
375 size = 1 << (order + PAGE_SHIFT);
376 vaddr = pcxl_alloc_range(size);
377 paddr = __get_free_pages(flag, order);
378 flush_kernel_dcache_range(paddr, size);
380 map_uncached_pages(vaddr, size, paddr);
381 *dma_handle = (dma_addr_t) paddr;
384 /* This probably isn't needed to support EISA cards.
385 ** ISA cards will certainly only support 24-bit DMA addressing.
386 ** Not clear if we can, want, or need to support ISA.
388 if (!dev || *dev->coherent_dma_mask < 0xffffffff)
391 return (void *)vaddr;
394 static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
398 order = get_order(size);
399 size = 1 << (order + PAGE_SHIFT);
400 unmap_uncached_pages((unsigned long)vaddr, size);
401 pcxl_free_range((unsigned long)vaddr, size);
402 free_pages((unsigned long)__va(dma_handle), order);
405 static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
407 if (direction == DMA_NONE) {
408 printk(KERN_ERR "pa11_dma_map_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
412 flush_kernel_dcache_range((unsigned long) addr, size);
413 return virt_to_phys(addr);
416 static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
418 if (direction == DMA_NONE) {
419 printk(KERN_ERR "pa11_dma_unmap_single(PCI_DMA_NONE) called by %p\n", __builtin_return_address(0));
423 if (direction == DMA_TO_DEVICE)
427 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
428 * simple map/unmap case. However, it IS necessary if if
429 * pci_dma_sync_single_* has been called and the buffer reused.
432 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
436 static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
440 if (direction == DMA_NONE)
443 for (i = 0; i < nents; i++, sglist++ ) {
444 unsigned long vaddr = sg_virt_addr(sglist);
445 sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(vaddr);
446 sg_dma_len(sglist) = sglist->length;
447 flush_kernel_dcache_range(vaddr, sglist->length);
452 static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
456 if (direction == DMA_NONE)
459 if (direction == DMA_TO_DEVICE)
462 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
464 for (i = 0; i < nents; i++, sglist++ )
465 flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
469 static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
471 if (direction == DMA_NONE)
474 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
477 static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
479 if (direction == DMA_NONE)
482 flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
485 static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
489 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
491 for (i = 0; i < nents; i++, sglist++ )
492 flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
495 static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
499 /* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
501 for (i = 0; i < nents; i++, sglist++ )
502 flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
505 struct hppa_dma_ops pcxl_dma_ops = {
506 .dma_supported = pa11_dma_supported,
507 .alloc_consistent = pa11_dma_alloc_consistent,
508 .alloc_noncoherent = pa11_dma_alloc_consistent,
509 .free_consistent = pa11_dma_free_consistent,
510 .map_single = pa11_dma_map_single,
511 .unmap_single = pa11_dma_unmap_single,
512 .map_sg = pa11_dma_map_sg,
513 .unmap_sg = pa11_dma_unmap_sg,
514 .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
515 .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
516 .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
517 .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
520 static void *fail_alloc_consistent(struct device *dev, size_t size,
521 dma_addr_t *dma_handle, int flag)
526 static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
527 dma_addr_t *dma_handle, int flag)
531 /* rely on kmalloc to be cacheline aligned */
532 addr = kmalloc(size, flag);
534 *dma_handle = (dma_addr_t)virt_to_phys(addr);
539 static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
540 void *vaddr, dma_addr_t iova)
546 struct hppa_dma_ops pcx_dma_ops = {
547 .dma_supported = pa11_dma_supported,
548 .alloc_consistent = fail_alloc_consistent,
549 .alloc_noncoherent = pa11_dma_alloc_noncoherent,
550 .free_consistent = pa11_dma_free_noncoherent,
551 .map_single = pa11_dma_map_single,
552 .unmap_single = pa11_dma_unmap_single,
553 .map_sg = pa11_dma_map_sg,
554 .unmap_sg = pa11_dma_unmap_sg,
555 .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
556 .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
557 .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
558 .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
562 static int pcxl_proc_info(char *buf, char **start, off_t offset, int len)
565 unsigned long *res_ptr = (u_long *)pcxl_res_map;
566 unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */
568 sprintf(buf, "\nDMA Mapping Area size : %d bytes (%d pages)\n",
570 (pcxl_res_size << 3) ); /* 1 bit per page */
572 sprintf(buf, "%sResource bitmap : %d bytes (%d pages)\n",
573 buf, pcxl_res_size, pcxl_res_size << 3); /* 8 bits per byte */
575 strcat(buf, " total: free: used: % used:\n");
576 sprintf(buf, "%sblocks %8d %8ld %8ld %8ld%%\n", buf, pcxl_res_size,
577 pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
578 (pcxl_used_bytes * 100) / pcxl_res_size);
580 sprintf(buf, "%spages %8ld %8ld %8ld %8ld%%\n", buf, total_pages,
581 total_pages - pcxl_used_pages, pcxl_used_pages,
582 (pcxl_used_pages * 100 / total_pages));
584 strcat(buf, "\nResource bitmap:");
586 for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
589 sprintf(buf, "%s %08lx", buf, *res_ptr);