Merge to Fedora kernel-2.6.18-1.2255_FC5-vs2.0.2.2-rc9 patched with stable patch...
[linux-2.6.git] / include / asm-ia64 / dma-mapping.h
1 #ifndef _ASM_IA64_DMA_MAPPING_H
2 #define _ASM_IA64_DMA_MAPPING_H
3
4 /*
5  * Copyright (C) 2003-2004 Hewlett-Packard Co
6  *      David Mosberger-Tang <davidm@hpl.hp.com>
7  */
8 #include <asm/machvec.h>
9 #ifndef CONFIG_XEN
10
11 #define dma_alloc_coherent      platform_dma_alloc_coherent
12 #define dma_alloc_noncoherent   platform_dma_alloc_coherent     /* coherent mem. is cheap */
13 #define dma_free_coherent       platform_dma_free_coherent
14 #define dma_free_noncoherent    platform_dma_free_coherent
15 #define dma_map_single          platform_dma_map_single
16 #define dma_map_sg              platform_dma_map_sg
17 #define dma_unmap_single        platform_dma_unmap_single
18 #define dma_unmap_sg            platform_dma_unmap_sg
19 #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu
20 #define dma_sync_sg_for_cpu     platform_dma_sync_sg_for_cpu
21 #define dma_sync_single_for_device platform_dma_sync_single_for_device
22 #define dma_sync_sg_for_device  platform_dma_sync_sg_for_device
23 #define dma_mapping_error       platform_dma_mapping_error
24
25 #else /* CONFIG_XEN */
26 /* Needed for arch/i386/kernel/swiotlb.c and arch/i386/kernel/pci-dma-xen.c */
27 #include <asm/hypervisor.h>
28 /* Needed for arch/i386/kernel/swiotlb.c */
29 #include <asm-i386/mach-xen/asm/swiotlb.h>
30
31 int dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
32                enum dma_data_direction direction);
33 void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
34                   enum dma_data_direction direction);
35 int dma_supported(struct device *dev, u64 mask);
36 void *dma_alloc_coherent(struct device *dev, size_t size,
37                          dma_addr_t *dma_handle, gfp_t gfp);
38 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
39                        dma_addr_t dma_handle);
40 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
41                           enum dma_data_direction direction);
42 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
43                       enum dma_data_direction direction);
44 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
45                              size_t size, enum dma_data_direction direction);
46 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
47                                 size_t size,
48                                 enum dma_data_direction direction);
49 int dma_mapping_error(dma_addr_t dma_addr);
50
51 #define flush_write_buffers()   do { } while (0)
52 static inline void
53 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
54                     enum dma_data_direction direction)
55 {
56         if (swiotlb)
57                 swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
58         flush_write_buffers();
59 }
60
61 static inline void
62 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
63                        enum dma_data_direction direction)
64 {
65         if (swiotlb)
66                 swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
67         flush_write_buffers();
68 }
69 #endif /* CONFIG_XEN */
70
71 #define dma_map_page(dev, pg, off, size, dir)                           \
72         dma_map_single(dev, page_address(pg) + (off), (size), (dir))
73 #define dma_unmap_page(dev, dma_addr, size, dir)                        \
74         dma_unmap_single(dev, dma_addr, size, dir)
75
76 /*
77  * Rest of this file is part of the "Advanced DMA API".  Use at your own risk.
78  * See Documentation/DMA-API.txt for details.
79  */
80
81 #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir)       \
82         dma_sync_single_for_cpu(dev, dma_handle, size, dir)
83 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir)    \
84         dma_sync_single_for_device(dev, dma_handle, size, dir)
85
86 #ifndef CONFIG_XEN
87 #define dma_supported           platform_dma_supported
88 #endif
89
90 static inline int
91 dma_set_mask (struct device *dev, u64 mask)
92 {
93         if (!dev->dma_mask || !dma_supported(dev, mask))
94                 return -EIO;
95         *dev->dma_mask = mask;
96         return 0;
97 }
98
99 extern int dma_get_cache_alignment(void);
100
101 static inline void
102 dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir)
103 {
104         /*
105          * IA-64 is cache-coherent, so this is mostly a no-op.  However, we do need to
106          * ensure that dma_cache_sync() enforces order, hence the mb().
107          */
108         mb();
109 }
110
111 #define dma_is_consistent(dma_handle)   (1)     /* all we do is coherent memory... */
112
113 #ifdef CONFIG_XEN
114 /* arch/i386/kernel/swiotlb.o requires */
115 void contiguous_bitmap_init(unsigned long end_pfn);
116
117 static inline int
118 address_needs_mapping(struct device *hwdev, dma_addr_t addr)
119 {
120         dma_addr_t mask = DMA_64BIT_MASK;
121         /* If the device has a mask, use it, otherwise default to 64 bits */
122         if (hwdev && hwdev->dma_mask)
123                 mask = *hwdev->dma_mask;
124         return (addr & ~mask) != 0;
125 }
126 #else
127 #define contiguous_bitmap_init(end_pfn) ((void)end_pfn)
128 #endif
129
130 static inline int
131 range_straddles_page_boundary(void *p, size_t size)
132 {
133         extern unsigned long *contiguous_bitmap;
134         return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
135                 !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
136 }
137
138 #endif /* _ASM_IA64_DMA_MAPPING_H */