2 * arch/arm/common/dmabounce.c
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
8 * RAM and the remainder of memory is at the top an the DMA memory
9 * can be marked as ZONE_DMA. Anything beyond that such as discontigous
10 * DMA windows will require custom implementations that reserve memory
11 * areas at early bootup.
13 * Original version by Brad Parker (brad@heeltoe.com)
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
15 * Made generic by Deepak Saxena <dsaxena@plexity.net>
17 * Copyright (C) 2002 Hewlett Packard Company.
18 * Copyright (C) 2004 MontaVista Software, Inc.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/device.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/dmapool.h>
31 #include <linux/list.h>
37 #define DO_STATS(X) do { X ; } while (0)
39 #define DO_STATS(X) do { } while (0)
42 /* ************************************************** */
45 struct list_head node;
47 /* original request */
52 /* safe buffer info */
53 struct dma_pool *pool;
55 dma_addr_t safe_dma_addr;
58 struct dmabounce_device_info {
59 struct list_head node;
62 struct dma_pool *small_buffer_pool;
63 struct dma_pool *large_buffer_pool;
64 struct list_head safe_buffers;
65 unsigned long small_buffer_size, large_buffer_size;
67 unsigned long sbp_allocs;
68 unsigned long lbp_allocs;
69 unsigned long total_allocs;
70 unsigned long map_op_count;
71 unsigned long bounce_count;
75 static LIST_HEAD(dmabounce_devs);
78 static void print_alloc_stats(struct dmabounce_device_info *device_info)
81 "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n",
82 device_info->dev->bus_id,
83 device_info->sbp_allocs, device_info->lbp_allocs,
84 device_info->total_allocs - device_info->sbp_allocs -
85 device_info->lbp_allocs,
86 device_info->total_allocs);
90 /* find the given device in the dmabounce device list */
91 static inline struct dmabounce_device_info *
92 find_dmabounce_dev(struct device *dev)
94 struct list_head *entry;
96 list_for_each(entry, &dmabounce_devs) {
97 struct dmabounce_device_info *d =
98 list_entry(entry, struct dmabounce_device_info, node);
106 /* allocate a 'safe' buffer and keep track of it */
107 static inline struct safe_buffer *
108 alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
109 size_t size, enum dma_data_direction dir)
111 struct safe_buffer *buf;
112 struct dma_pool *pool;
113 struct device *dev = device_info->dev;
115 dma_addr_t safe_dma_addr;
117 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
118 __func__, ptr, size, dir);
120 DO_STATS ( device_info->total_allocs++ );
122 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
124 dev_warn(dev, "%s: kmalloc failed\n", __func__);
128 if (size <= device_info->small_buffer_size) {
129 pool = device_info->small_buffer_pool;
130 safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
132 DO_STATS ( device_info->sbp_allocs++ );
133 } else if (size <= device_info->large_buffer_size) {
134 pool = device_info->large_buffer_pool;
135 safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr);
137 DO_STATS ( device_info->lbp_allocs++ );
140 safe = dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC);
144 dev_warn(device_info->dev,
145 "%s: could not alloc dma memory (size=%d)\n",
152 if (device_info->total_allocs % 1000 == 0)
153 print_alloc_stats(device_info);
158 buf->direction = dir;
161 buf->safe_dma_addr = safe_dma_addr;
163 list_add(&buf->node, &device_info->safe_buffers);
168 /* determine if a buffer is from our "safe" pool */
169 static inline struct safe_buffer *
170 find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
172 struct list_head *entry;
174 list_for_each(entry, &device_info->safe_buffers) {
175 struct safe_buffer *b =
176 list_entry(entry, struct safe_buffer, node);
178 if (b->safe_dma_addr == safe_dma_addr)
186 free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
188 dev_dbg(dev_info->dev, "%s(buf=%p)\n", __func__, buf);
190 list_del(&buf->node);
193 dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr);
195 dma_free_coherent(device_info->dev, buf->size, buf->safe,
201 /* ************************************************** */
205 static void print_map_stats(struct dmabounce_device_info *device_info)
208 "%s: dmabounce: map_op_count=%lu, bounce_count=%lu\n",
209 device_info->dev->bus_id,
210 device_info->map_op_count, device_info->bounce_count);
214 static inline dma_addr_t
215 map_single(struct device *dev, void *ptr, size_t size,
216 enum dma_data_direction dir)
219 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
222 DO_STATS ( device_info->map_op_count++ );
227 limit = (*dev->dma_mask + 1) & ~(*dev->dma_mask);
228 if (limit && (size > limit)) {
229 dev_err(dev, "DMA mapping too big "
230 "(requested %#x mask %#Lx)\n",
231 size, *dev->dma_mask);
236 dma_addr = virt_to_bus(ptr);
238 if (device_info && dma_needs_bounce(dev, dma_addr, size)) {
239 struct safe_buffer *buf;
241 buf = alloc_safe_buffer(device_info, ptr, size, dir);
243 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
249 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
250 __func__, buf->ptr, (void *) virt_to_bus(buf->ptr),
251 buf->safe, (void *) buf->safe_dma_addr);
253 if ((dir == DMA_TO_DEVICE) ||
254 (dir == DMA_BIDIRECTIONAL)) {
255 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
256 __func__, ptr, buf->safe, size);
257 memcpy(buf->safe, ptr, size);
259 consistent_sync(buf->safe, size, dir);
261 dma_addr = buf->safe_dma_addr;
263 consistent_sync(ptr, size, dir);
270 unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
271 enum dma_data_direction dir)
273 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
274 struct safe_buffer *buf = NULL;
277 * Trying to unmap an invalid mapping
279 if (dma_addr == ~0) {
280 dev_err(dev, "Trying to unmap invalid mapping\n");
285 buf = find_safe_buffer(device_info, dma_addr);
288 BUG_ON(buf->size != size);
291 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
292 __func__, buf->ptr, (void *) virt_to_bus(buf->ptr),
293 buf->safe, (void *) buf->safe_dma_addr);
296 DO_STATS ( device_info->bounce_count++ );
298 if ((dir == DMA_FROM_DEVICE) ||
299 (dir == DMA_BIDIRECTIONAL)) {
301 "%s: copy back safe %p to unsafe %p size %d\n",
302 __func__, buf->safe, buf->ptr, size);
303 memcpy(buf->ptr, buf->safe, size);
305 free_safe_buffer(device_info, buf);
310 sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
311 enum dma_data_direction dir)
313 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
314 struct safe_buffer *buf = NULL;
317 buf = find_safe_buffer(device_info, dma_addr);
321 * Both of these checks from original code need to be
322 * commented out b/c some drivers rely on the following:
324 * 1) Drivers may map a large chunk of memory into DMA space
325 * but only sync a small portion of it. Good example is
326 * allocating a large buffer, mapping it, and then
327 * breaking it up into small descriptors. No point
328 * in syncing the whole buffer if you only have to
329 * touch one descriptor.
331 * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are
332 * usually only synced in one dir at a time.
334 * See drivers/net/eepro100.c for examples of both cases.
338 * BUG_ON(buf->size != size);
339 * BUG_ON(buf->direction != dir);
343 "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n",
344 __func__, buf->ptr, (void *) virt_to_bus(buf->ptr),
345 buf->safe, (void *) buf->safe_dma_addr);
347 DO_STATS ( device_info->bounce_count++ );
350 case DMA_FROM_DEVICE:
352 "%s: copy back safe %p to unsafe %p size %d\n",
353 __func__, buf->safe, buf->ptr, size);
354 memcpy(buf->ptr, buf->safe, size);
358 "%s: copy out unsafe %p to safe %p, size %d\n",
359 __func__,buf->ptr, buf->safe, size);
360 memcpy(buf->safe, buf->ptr, size);
362 case DMA_BIDIRECTIONAL:
363 BUG(); /* is this allowed? what does it mean? */
367 consistent_sync(buf->safe, size, dir);
369 consistent_sync(bus_to_virt(dma_addr), size, dir);
373 /* ************************************************** */
376 * see if a buffer address is in an 'unsafe' range. if it is
377 * allocate a 'safe' buffer and copy the unsafe buffer into it.
378 * substitute the safe buffer for the unsafe one.
379 * (basically move the buffer from an unsafe area to a safe one)
382 dma_map_single(struct device *dev, void *ptr, size_t size,
383 enum dma_data_direction dir)
388 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
389 __func__, ptr, size, dir);
391 BUG_ON(dir == DMA_NONE);
393 local_irq_save(flags);
395 dma_addr = map_single(dev, ptr, size, dir);
397 local_irq_restore(flags);
403 * see if a mapped address was really a "safe" buffer and if so, copy
404 * the data from the safe buffer back to the unsafe buffer and free up
405 * the safe buffer. (basically return things back to the way they
410 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
411 enum dma_data_direction dir)
415 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
416 __func__, (void *) dma_addr, size, dir);
418 BUG_ON(dir == DMA_NONE);
420 local_irq_save(flags);
422 unmap_single(dev, dma_addr, size, dir);
424 local_irq_restore(flags);
428 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
429 enum dma_data_direction dir)
434 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
435 __func__, sg, nents, dir);
437 BUG_ON(dir == DMA_NONE);
439 local_irq_save(flags);
441 for (i = 0; i < nents; i++, sg++) {
442 struct page *page = sg->page;
443 unsigned int offset = sg->offset;
444 unsigned int length = sg->length;
445 void *ptr = page_address(page) + offset;
448 map_single(dev, ptr, length, dir);
451 local_irq_restore(flags);
457 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
458 enum dma_data_direction dir)
463 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
464 __func__, sg, nents, dir);
466 BUG_ON(dir == DMA_NONE);
468 local_irq_save(flags);
470 for (i = 0; i < nents; i++, sg++) {
471 dma_addr_t dma_addr = sg->dma_address;
472 unsigned int length = sg->length;
474 unmap_single(dev, dma_addr, length, dir);
477 local_irq_restore(flags);
481 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size,
482 enum dma_data_direction dir)
486 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
487 __func__, (void *) dma_addr, size, dir);
489 local_irq_save(flags);
491 sync_single(dev, dma_addr, size, dir);
493 local_irq_restore(flags);
497 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size,
498 enum dma_data_direction dir)
502 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
503 __func__, (void *) dma_addr, size, dir);
505 local_irq_save(flags);
507 sync_single(dev, dma_addr, size, dir);
509 local_irq_restore(flags);
513 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
514 enum dma_data_direction dir)
519 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
520 __func__, sg, nents, dir);
522 BUG_ON(dir == DMA_NONE);
524 local_irq_save(flags);
526 for (i = 0; i < nents; i++, sg++) {
527 dma_addr_t dma_addr = sg->dma_address;
528 unsigned int length = sg->length;
530 sync_single(dev, dma_addr, length, dir);
533 local_irq_restore(flags);
537 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
538 enum dma_data_direction dir)
543 dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
544 __func__, sg, nents, dir);
546 BUG_ON(dir == DMA_NONE);
548 local_irq_save(flags);
550 for (i = 0; i < nents; i++, sg++) {
551 dma_addr_t dma_addr = sg->dma_address;
552 unsigned int length = sg->length;
554 sync_single(dev, dma_addr, length, dir);
557 local_irq_restore(flags);
561 dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
562 unsigned long large_buffer_size)
564 struct dmabounce_device_info *device_info;
566 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
569 "Could not allocated dmabounce_device_info for %s",
574 device_info->small_buffer_pool =
575 dma_pool_create("small_dmabounce_pool",
578 0 /* byte alignment */,
579 0 /* no page-crossing issues */);
580 if (!device_info->small_buffer_pool) {
582 "dmabounce: could not allocate small DMA pool for %s\n",
588 if (large_buffer_size) {
589 device_info->large_buffer_pool =
590 dma_pool_create("large_dmabounce_pool",
593 0 /* byte alignment */,
594 0 /* no page-crossing issues */);
595 if (!device_info->large_buffer_pool) {
597 "dmabounce: could not allocate large DMA pool for %s\n",
599 dma_pool_destroy(device_info->small_buffer_pool);
605 device_info->dev = dev;
606 device_info->small_buffer_size = small_buffer_size;
607 device_info->large_buffer_size = large_buffer_size;
608 INIT_LIST_HEAD(&device_info->safe_buffers);
611 device_info->sbp_allocs = 0;
612 device_info->lbp_allocs = 0;
613 device_info->total_allocs = 0;
614 device_info->map_op_count = 0;
615 device_info->bounce_count = 0;
618 list_add(&device_info->node, &dmabounce_devs);
620 printk(KERN_INFO "dmabounce: registered device %s on %s bus\n",
621 dev->bus_id, dev->bus->name);
627 dmabounce_unregister_dev(struct device *dev)
629 struct dmabounce_device_info *device_info = find_dmabounce_dev(dev);
633 "%s: Never registered with dmabounce but attempting" \
634 "to unregister!\n", dev->bus_id);
638 if (!list_empty(&device_info->safe_buffers)) {
640 "%s: Removing from dmabounce with pending buffers!\n",
645 if (device_info->small_buffer_pool)
646 dma_pool_destroy(device_info->small_buffer_pool);
647 if (device_info->large_buffer_pool)
648 dma_pool_destroy(device_info->large_buffer_pool);
651 print_alloc_stats(device_info);
652 print_map_stats(device_info);
655 list_del(&device_info->node);
659 printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n",
660 dev->bus_id, dev->bus->name);
664 EXPORT_SYMBOL(dma_map_single);
665 EXPORT_SYMBOL(dma_unmap_single);
666 EXPORT_SYMBOL(dma_map_sg);
667 EXPORT_SYMBOL(dma_unmap_sg);
668 EXPORT_SYMBOL(dma_sync_single);
669 EXPORT_SYMBOL(dma_sync_sg);
670 EXPORT_SYMBOL(dmabounce_register_dev);
671 EXPORT_SYMBOL(dmabounce_unregister_dev);
673 MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
674 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
675 MODULE_LICENSE("GPL");