fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / include / asm-i386 / mach-xen / asm / dma-mapping.h
1 #ifndef _ASM_I386_DMA_MAPPING_H
2 #define _ASM_I386_DMA_MAPPING_H
3
4 #include <linux/mm.h>
5
6 #include <asm/cache.h>
7 #include <asm/io.h>
8 #include <asm/scatterlist.h>
9 #include <asm/bug.h>
10 #include <asm/swiotlb.h>
11
12 static inline int
13 address_needs_mapping(struct device *hwdev, dma_addr_t addr)
14 {
15         dma_addr_t mask = 0xffffffff;
16         /* If the device has a mask, use it, otherwise default to 32 bits */
17         if (hwdev && hwdev->dma_mask)
18                 mask = *hwdev->dma_mask;
19         return (addr & ~mask) != 0;
20 }
21
22 static inline int
23 range_straddles_page_boundary(void *p, size_t size)
24 {
25         extern unsigned long *contiguous_bitmap;
26         return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
27                 !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
28 }
29
30 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
31 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
32
33 void *dma_alloc_coherent(struct device *dev, size_t size,
34                            dma_addr_t *dma_handle, gfp_t flag);
35
36 void dma_free_coherent(struct device *dev, size_t size,
37                          void *vaddr, dma_addr_t dma_handle);
38
39 extern dma_addr_t
40 dma_map_single(struct device *dev, void *ptr, size_t size,
41                enum dma_data_direction direction);
42
43 extern void
44 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
45                  enum dma_data_direction direction);
46
47 extern int
48 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
49            enum dma_data_direction direction);
50
51 extern dma_addr_t
52 dma_map_page(struct device *dev, struct page *page, unsigned long offset,
53              size_t size, enum dma_data_direction direction);
54
55 extern void
56 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
57                enum dma_data_direction direction);
58
59 extern void
60 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
61              enum dma_data_direction direction);
62
63 extern void
64 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
65                         enum dma_data_direction direction);
66
67 extern void
68 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
69                            enum dma_data_direction direction);
70
71 static inline void
72 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
73                               unsigned long offset, size_t size,
74                               enum dma_data_direction direction)
75 {
76         dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
77 }
78
79 static inline void
80 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
81                                  unsigned long offset, size_t size,
82                                  enum dma_data_direction direction)
83 {
84         dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
85 }
86
87 static inline void
88 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
89                     enum dma_data_direction direction)
90 {
91         if (swiotlb)
92                 swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
93         flush_write_buffers();
94 }
95
96 static inline void
97 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
98                     enum dma_data_direction direction)
99 {
100         if (swiotlb)
101                 swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
102         flush_write_buffers();
103 }
104
105 extern int
106 dma_mapping_error(dma_addr_t dma_addr);
107
108 extern int
109 dma_supported(struct device *dev, u64 mask);
110
111 static inline int
112 dma_set_mask(struct device *dev, u64 mask)
113 {
114         if(!dev->dma_mask || !dma_supported(dev, mask))
115                 return -EIO;
116
117         *dev->dma_mask = mask;
118
119         return 0;
120 }
121
122 static inline int
123 dma_get_cache_alignment(void)
124 {
125         /* no easy way to get cache size on all x86, so return the
126          * maximum possible, to be safe */
127         return (1 << INTERNODE_CACHE_SHIFT);
128 }
129
130 #define dma_is_consistent(d, h) (1)
131
132 static inline void
133 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
134                enum dma_data_direction direction)
135 {
136         flush_write_buffers();
137 }
138
139 #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
140 extern int
141 dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
142                             dma_addr_t device_addr, size_t size, int flags);
143
144 extern void
145 dma_release_declared_memory(struct device *dev);
146
147 extern void *
148 dma_mark_declared_memory_occupied(struct device *dev,
149                                   dma_addr_t device_addr, size_t size);
150
151 #endif